PyTorch
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
Functions.h
Go to the documentation of this file.
1#pragma once
2
3// @generated by torchgen/gen.py from Functions.h
4
5#ifdef TORCH_ASSERT_NO_OPERATORS
6#error This change adds a dependency on native_functions.yaml, \
7 meaning the file will need to be re-compiled every time an operator \
8 is changed or added. Consider if your change would be better placed in \
9 another file, or if a more specific header might achieve the same goal. \
10 See NOTE: [Tensor vs. TensorBase]
11#endif
12
13#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
14#error This change adds a dependency on all pytorch operators, meaning the \
15 file will need to be re-compiled every time an operator is changed or added. \
16 Consider including a specific operator from <ATen/ops/{my_operator}.h> and \
17 see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
18#endif
19
20// NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS]
21//
22// In ATen, certain generated headers files include the definitions of
23// every single operator in PyTorch. Unfortunately this means every
24// time an operator signature is updated or changed in
25// native_functions.yaml, you (and every other PyTorch developer) need
26// to recompile every source file that includes any of these headers.
27//
28// To break up these header dependencies, and improve incremental
29// build times for all PyTorch developers. These headers are split
30// into per-operator headers in the `ATen/ops` folder. This limits
31// incremental builds to only changes to methods of `Tensor`, or files
32// that use the specific operator being changed. With `at::sum` as an
33// example, you should include
34//
35// <ATen/ops/sum.h> // instead of ATen/Functions.h
36// <ATen/ops/sum_native.h> // instead of ATen/NativeFunctions.h
37// <ATen/ops/sum_ops.h> // instead of ATen/Operators.h
38// <ATen/ops/sum_cpu_dispatch.h> // instead of ATen/CPUFunctions.h
39//
40// However, even if you're careful to use this in your own code.
41// `Functions.h` might be included indirectly through another header
42// without you realising. To avoid this, you can add
43//
44// #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
45//
46// to the top of your source file. This way any time the non-specific
47// headers are included, the compiler will error out.
48//
49// Also, be aware that `ops` are not available in all build
50// configurations (namely fb-internal) so you must guard these
51// includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g.
52//
53// #ifndef AT_PER_OPERATOR_HEADERS
54// #include <ATen/Functions.h>
55// #else
56// #include <ATen/ops/sum.h>
57// #endif
58
59#include <ATen/Context.h>
60#include <ATen/DeviceGuard.h>
61#include <ATen/TensorUtils.h>
62#include <ATen/TracerMode.h>
63#include <ATen/core/Generator.h>
64#include <ATen/core/Reduction.h>
65#include <c10/core/SymInt.h>
66#include <ATen/core/Tensor.h>
67#include <c10/core/Scalar.h>
68#include <c10/core/Storage.h>
69#include <c10/core/TensorOptions.h>
70#include <c10/util/Deprecated.h>
71#include <c10/util/Optional.h>
73
74#include <ATen/ops/from_blob.h>
75#include <ATen/ops/tensor.h>
76
77#include <ATen/Operators.h>
78
79namespace at {
80
81
82// aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
83inline at::Tensor _cast_Byte(const at::Tensor & self, bool non_blocking=false) {
84 return at::_ops::_cast_Byte::call(self, non_blocking);
85}
86
87// aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor
88inline at::Tensor _cast_Char(const at::Tensor & self, bool non_blocking=false) {
89 return at::_ops::_cast_Char::call(self, non_blocking);
90}
91
92// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor
93inline at::Tensor _cast_Double(const at::Tensor & self, bool non_blocking=false) {
94 return at::_ops::_cast_Double::call(self, non_blocking);
95}
96
97// aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor
98inline at::Tensor _cast_Float(const at::Tensor & self, bool non_blocking=false) {
99 return at::_ops::_cast_Float::call(self, non_blocking);
100}
101
102// aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor
103inline at::Tensor _cast_Int(const at::Tensor & self, bool non_blocking=false) {
104 return at::_ops::_cast_Int::call(self, non_blocking);
105}
106
107// aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor
108inline at::Tensor _cast_Long(const at::Tensor & self, bool non_blocking=false) {
109 return at::_ops::_cast_Long::call(self, non_blocking);
110}
111
112// aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor
113inline at::Tensor _cast_Short(const at::Tensor & self, bool non_blocking=false) {
114 return at::_ops::_cast_Short::call(self, non_blocking);
115}
116
117// aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor
118inline at::Tensor _cast_Half(const at::Tensor & self, bool non_blocking=false) {
119 return at::_ops::_cast_Half::call(self, non_blocking);
120}
121
122// aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
123inline at::Tensor _make_dual(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
124 return at::_ops::_make_dual::call(primal, tangent, level);
125}
126
127// aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
128inline ::std::tuple<at::Tensor,at::Tensor> _unpack_dual(const at::Tensor & dual, int64_t level) {
129 return at::_ops::_unpack_dual::call(dual, level);
130}
131
132// aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
133inline at::Tensor _new_zeros_with_same_feature_meta(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0) {
134 return at::_ops::_new_zeros_with_same_feature_meta::call(self, other, self_num_batch_dims);
135}
136
137// aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool
138inline bool _has_same_storage_numel(const at::Tensor & self, const at::Tensor & other) {
139 return at::_ops::_has_same_storage_numel::call(self, other);
140}
141
142// aten::align_tensors(Tensor[] tensors) -> Tensor[]
143inline ::std::vector<at::Tensor> align_tensors(at::TensorList tensors) {
144 return at::_ops::align_tensors::call(tensors);
145}
146
147// aten::_assert_async(Tensor self) -> ()
148inline void _assert_async(const at::Tensor & self) {
149 return at::_ops::_assert_async::call(self);
150}
151
152// aten::_assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> ()
153inline void _assert_tensor_metadata(const at::Tensor & a, at::OptionalIntArrayRef size=c10::nullopt, at::OptionalIntArrayRef stride=c10::nullopt, c10::optional<at::ScalarType> dtype=c10::nullopt) {
154 return at::_ops::_assert_tensor_metadata::call(a, size, stride, dtype);
155}
156
157// aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
158inline bool _use_cudnn_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank) {
159 return at::_ops::_use_cudnn_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank);
160}
161
162// aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool
163inline bool _use_cudnn_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank) {
164 return at::_ops::_use_cudnn_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank);
165}
166
167// aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
168inline ::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
169 return at::_ops::_cudnn_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
170}
171
172// aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
173inline ::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
174 return at::_ops::_cudnn_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
175}
176
177// aten::_use_cudnn_rnn_flatten_weight() -> bool
179 return at::_ops::_use_cudnn_rnn_flatten_weight::call();
180}
181
182// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
183inline at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
184 return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
185}
186namespace symint {
187 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
188 at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
189 return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
190 }
191}
192
193// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
194inline at::Tensor _cudnn_rnn_flatten_weight_symint(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
195 return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
196}
197namespace symint {
198 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
199 at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
200 return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
201 }
202}
203
204// aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
205inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
206 return at::_ops::_cudnn_rnn::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state);
207}
208namespace symint {
209 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
210 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
211 return at::_ops::_cudnn_rnn::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state);
212 }
213}
214
215// aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
216inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn_symint(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
217 return at::_ops::_cudnn_rnn::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
218}
219namespace symint {
220 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
221 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
222 return at::_ops::_cudnn_rnn::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
223 }
224}
225
226// aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
227inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
228 return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask);
229}
230namespace symint {
231 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
232 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
233 return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask);
234 }
235}
236
237// aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
238inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward_symint(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
239 return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
240}
241namespace symint {
242 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
243 ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
244 return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
245 }
246}
247
248// aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
249inline at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, at::TensorOptions options) {
250 return at::_ops::_cudnn_init_dropout_state::call(dropout, train, dropout_seed, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
251}
252// aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
254 return at::_ops::_cudnn_init_dropout_state::call(dropout, train, dropout_seed, dtype, layout, device, pin_memory);
255}
256
257// aten::_debug_has_internal_overlap(Tensor self) -> int
258inline int64_t _debug_has_internal_overlap(const at::Tensor & self) {
259 return at::_ops::_debug_has_internal_overlap::call(self);
260}
261
262// aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
263inline ::std::tuple<at::Tensor,at::Tensor> _fused_dropout(const at::Tensor & self, double p, c10::optional<at::Generator> generator=c10::nullopt) {
264 return at::_ops::_fused_dropout::call(self, p, generator);
265}
266
267// aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
268inline at::Tensor _masked_scale(const at::Tensor & self, const at::Tensor & mask, double scale) {
269 return at::_ops::_masked_scale::call(self, mask, scale);
270}
271
272// aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)
273inline ::std::tuple<at::Tensor,at::Tensor> native_dropout(const at::Tensor & input, double p, c10::optional<bool> train) {
274 return at::_ops::native_dropout::call(input, p, train);
275}
276
277// aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
278inline at::Tensor native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
279 return at::_ops::native_dropout_backward::call(grad_output, mask, scale);
280}
281
282// aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)
283inline ::std::tuple<at::Tensor,at::Tensor> _sobol_engine_draw(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<at::ScalarType> dtype) {
284 return at::_ops::_sobol_engine_draw::call(quasi, n, sobolstate, dimension, num_generated, dtype);
285}
286
287// aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
288inline at::Tensor & _sobol_engine_ff_(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
289 return at::_ops::_sobol_engine_ff_::call(self, n, sobolstate, dimension, num_generated);
290}
291
292// aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
293inline at::Tensor & _sobol_engine_scramble_(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
294 return at::_ops::_sobol_engine_scramble_::call(self, ltm, dimension);
295}
296
297// aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
298inline at::Tensor & _sobol_engine_initialize_state_(at::Tensor & self, int64_t dimension) {
299 return at::_ops::_sobol_engine_initialize_state_::call(self, dimension);
300}
301
302// aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
303inline at::Tensor _reshape_from_tensor(const at::Tensor & self, const at::Tensor & shape) {
304 return at::_ops::_reshape_from_tensor::call(self, shape);
305}
306
307// aten::_shape_as_tensor(Tensor self) -> Tensor
309 return at::_ops::_shape_as_tensor::call(self);
310}
311
312// aten::dropout(Tensor input, float p, bool train) -> Tensor
313inline at::Tensor dropout(const at::Tensor & input, double p, bool train) {
314 return at::_ops::dropout::call(input, p, train);
315}
316
317// aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
318inline at::Tensor & dropout_(at::Tensor & self, double p, bool train) {
319 return at::_ops::dropout_::call(self, p, train);
320}
321
322// aten::feature_dropout(Tensor input, float p, bool train) -> Tensor
323inline at::Tensor feature_dropout(const at::Tensor & input, double p, bool train) {
324 return at::_ops::feature_dropout::call(input, p, train);
325}
326
327// aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
328inline at::Tensor & feature_dropout_(at::Tensor & self, double p, bool train) {
329 return at::_ops::feature_dropout_::call(self, p, train);
330}
331
332// aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor
333inline at::Tensor alpha_dropout(const at::Tensor & input, double p, bool train) {
334 return at::_ops::alpha_dropout::call(input, p, train);
335}
336
337// aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
338inline at::Tensor & alpha_dropout_(at::Tensor & self, double p, bool train) {
339 return at::_ops::alpha_dropout_::call(self, p, train);
340}
341
342// aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
343inline at::Tensor feature_alpha_dropout(const at::Tensor & input, double p, bool train) {
344 return at::_ops::feature_alpha_dropout::call(input, p, train);
345}
346
347// aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
348inline at::Tensor & feature_alpha_dropout_(at::Tensor & self, double p, bool train) {
349 return at::_ops::feature_alpha_dropout_::call(self, p, train);
350}
351
352// aten::abs(Tensor self) -> Tensor
353inline at::Tensor abs(const at::Tensor & self) {
354 return at::_ops::abs::call(self);
355}
356
357// aten::abs_(Tensor(a!) self) -> Tensor(a!)
358inline at::Tensor & abs_(at::Tensor & self) {
359 return at::_ops::abs_::call(self);
360}
361
362// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
363inline at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) {
364 return at::_ops::abs_out::call(self, out);
365}
366// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
367inline at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) {
368 return at::_ops::abs_out::call(self, out);
369}
370
371// aten::absolute(Tensor self) -> Tensor
372inline at::Tensor absolute(const at::Tensor & self) {
373 return at::_ops::absolute::call(self);
374}
375
376// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
377inline at::Tensor & absolute_out(at::Tensor & out, const at::Tensor & self) {
378 return at::_ops::absolute_out::call(self, out);
379}
380// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
381inline at::Tensor & absolute_outf(const at::Tensor & self, at::Tensor & out) {
382 return at::_ops::absolute_out::call(self, out);
383}
384
385// aten::angle(Tensor self) -> Tensor
386inline at::Tensor angle(const at::Tensor & self) {
387 return at::_ops::angle::call(self);
388}
389
390// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
391inline at::Tensor & angle_out(at::Tensor & out, const at::Tensor & self) {
392 return at::_ops::angle_out::call(self, out);
393}
394// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
395inline at::Tensor & angle_outf(const at::Tensor & self, at::Tensor & out) {
396 return at::_ops::angle_out::call(self, out);
397}
398
399// aten::view_as_real(Tensor(a) self) -> Tensor(a)
400inline at::Tensor view_as_real(const at::Tensor & self) {
401 return at::_ops::view_as_real::call(self);
402}
403
404// aten::view_as_complex(Tensor(a) self) -> Tensor(a)
406 return at::_ops::view_as_complex::call(self);
407}
408
409// aten::sgn(Tensor self) -> Tensor
410inline at::Tensor sgn(const at::Tensor & self) {
411 return at::_ops::sgn::call(self);
412}
413
414// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
415inline at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) {
416 return at::_ops::sgn_out::call(self, out);
417}
418// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
419inline at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) {
420 return at::_ops::sgn_out::call(self, out);
421}
422
423// aten::real(Tensor(a) self) -> Tensor(a)
424inline at::Tensor real(const at::Tensor & self) {
425 return at::_ops::real::call(self);
426}
427
428// aten::imag(Tensor(a) self) -> Tensor(a)
429inline at::Tensor imag(const at::Tensor & self) {
430 return at::_ops::imag::call(self);
431}
432
433// aten::_conj(Tensor(a) self) -> Tensor(a)
434inline at::Tensor _conj(const at::Tensor & self) {
435 return at::_ops::_conj::call(self);
436}
437
438// aten::conj(Tensor(a) self) -> Tensor(a)
440 return at::_ops::conj::call(self);
441}
442
443// aten::_conj_physical(Tensor self) -> Tensor
445 return at::_ops::_conj_physical::call(self);
446}
447
448// aten::conj_physical(Tensor self) -> Tensor
449inline at::Tensor conj_physical(const at::Tensor & self) {
450 return at::_ops::conj_physical::call(self);
451}
452
453// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
454inline at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) {
455 return at::_ops::conj_physical_out::call(self, out);
456}
457// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
458inline at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) {
459 return at::_ops::conj_physical_out::call(self, out);
460}
461
462// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!)
464 return at::_ops::conj_physical_::call(self);
465}
466
467// aten::resolve_conj(Tensor(a) self) -> Tensor(a)
468inline at::Tensor resolve_conj(const at::Tensor & self) {
469 return at::_ops::resolve_conj::call(self);
470}
471
472// aten::resolve_neg(Tensor(a) self) -> Tensor(a)
473inline at::Tensor resolve_neg(const at::Tensor & self) {
474 return at::_ops::resolve_neg::call(self);
475}
476
477// aten::_neg_view(Tensor(a) self) -> Tensor(a)
478inline at::Tensor _neg_view(const at::Tensor & self) {
479 return at::_ops::_neg_view::call(self);
480}
481
482// aten::acos(Tensor self) -> Tensor
483inline at::Tensor acos(const at::Tensor & self) {
484 return at::_ops::acos::call(self);
485}
486
487// aten::acos_(Tensor(a!) self) -> Tensor(a!)
488inline at::Tensor & acos_(at::Tensor & self) {
489 return at::_ops::acos_::call(self);
490}
491
492// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
493inline at::Tensor & acos_out(at::Tensor & out, const at::Tensor & self) {
494 return at::_ops::acos_out::call(self, out);
495}
496// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
497inline at::Tensor & acos_outf(const at::Tensor & self, at::Tensor & out) {
498 return at::_ops::acos_out::call(self, out);
499}
500
501// aten::arccos(Tensor self) -> Tensor
502inline at::Tensor arccos(const at::Tensor & self) {
503 return at::_ops::arccos::call(self);
504}
505
506// aten::arccos_(Tensor(a!) self) -> Tensor(a!)
507inline at::Tensor & arccos_(at::Tensor & self) {
508 return at::_ops::arccos_::call(self);
509}
510
511// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
512inline at::Tensor & arccos_out(at::Tensor & out, const at::Tensor & self) {
513 return at::_ops::arccos_out::call(self, out);
514}
515// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
516inline at::Tensor & arccos_outf(const at::Tensor & self, at::Tensor & out) {
517 return at::_ops::arccos_out::call(self, out);
518}
519
520// aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
521inline at::Tensor avg_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true) {
522 return at::_ops::avg_pool1d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
523}
524
525// aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor
526inline at::Tensor adaptive_avg_pool1d(const at::Tensor & self, at::IntArrayRef output_size) {
527 return at::_ops::adaptive_avg_pool1d::call(self, output_size);
528}
529
530// aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)
531inline ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d(const at::Tensor & self, at::IntArrayRef output_size) {
532 return at::_ops::adaptive_max_pool1d::call(self, output_size);
533}
534
535// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
536inline at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) {
537 return at::_ops::add_Tensor::call(self, other, alpha);
538}
539
540// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
541inline at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) {
542 return at::_ops::add_out::call(self, other, alpha, out);
543}
544// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
545inline at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
546 return at::_ops::add_out::call(self, other, alpha, out);
547}
548
549// aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
550inline at::Tensor _add_relu(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) {
551 return at::_ops::_add_relu_Tensor::call(self, other, alpha);
552}
553
554// aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
555inline at::Tensor & _add_relu_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) {
556 return at::_ops::_add_relu__Tensor::call(self, other, alpha);
557}
558
559// aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
560inline at::Tensor & _add_relu_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) {
561 return at::_ops::_add_relu_out::call(self, other, alpha, out);
562}
563// aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
564inline at::Tensor & _add_relu_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
565 return at::_ops::_add_relu_out::call(self, other, alpha, out);
566}
567
568// aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
569inline at::Tensor _add_relu(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) {
570 return at::_ops::_add_relu_Scalar::call(self, other, alpha);
571}
572
573// aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
574inline at::Tensor & _add_relu_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) {
575 return at::_ops::_add_relu__Scalar::call(self, other, alpha);
576}
577
578// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
579inline at::Tensor add(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) {
580 return at::_ops::add_Scalar::call(self, other, alpha);
581}
582
583// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
584inline at::Tensor addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
585 return at::_ops::addmv::call(self, mat, vec, beta, alpha);
586}
587
588// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
589inline at::Tensor & addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
590 return at::_ops::addmv_::call(self, mat, vec, beta, alpha);
591}
592
593// aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
594inline at::Tensor & addmv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
595 return at::_ops::addmv_out::call(self, mat, vec, beta, alpha, out);
596}
597// aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
598inline at::Tensor & addmv_outf(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
599 return at::_ops::addmv_out::call(self, mat, vec, beta, alpha, out);
600}
601
602// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
603inline at::Tensor addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
604 return at::_ops::addr::call(self, vec1, vec2, beta, alpha);
605}
606
607// aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
608inline at::Tensor & addr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
609 return at::_ops::addr_out::call(self, vec1, vec2, beta, alpha, out);
610}
611// aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
612inline at::Tensor & addr_outf(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
613 return at::_ops::addr_out::call(self, vec1, vec2, beta, alpha, out);
614}
615
616// aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor
617inline at::Tensor affine_grid_generator(const at::Tensor & theta, at::IntArrayRef size, bool align_corners) {
618 return at::_ops::affine_grid_generator::call(theta, size, align_corners);
619}
620
621// aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor
622inline at::Tensor affine_grid_generator_backward(const at::Tensor & grad, at::IntArrayRef size, bool align_corners) {
623 return at::_ops::affine_grid_generator_backward::call(grad, size, align_corners);
624}
625
626// aten::_is_all_true(Tensor self) -> Tensor
627inline at::Tensor _is_all_true(const at::Tensor & self) {
628 return at::_ops::_is_all_true::call(self);
629}
630
631// aten::_is_any_true(Tensor self) -> Tensor
632inline at::Tensor _is_any_true(const at::Tensor & self) {
633 return at::_ops::_is_any_true::call(self);
634}
635
636// aten::_test_check_tensor(Tensor self) -> Tensor
638 return at::_ops::_test_check_tensor::call(self);
639}
640
641// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
642inline at::Tensor all(const at::Tensor & self, int64_t dim, bool keepdim=false) {
643 return at::_ops::all_dim::call(self, dim, keepdim);
644}
645
646// aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
647inline at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false) {
648 return at::_ops::all_out::call(self, dim, keepdim, out);
649}
650// aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
651inline at::Tensor & all_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
652 return at::_ops::all_out::call(self, dim, keepdim, out);
653}
654
655// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
656inline at::Tensor all(const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
657 return at::_ops::all_dimname::call(self, dim, keepdim);
658}
659
660// aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
661inline at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
662 return at::_ops::all_dimname_out::call(self, dim, keepdim, out);
663}
664// aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
665inline at::Tensor & all_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
666 return at::_ops::all_dimname_out::call(self, dim, keepdim, out);
667}
668
669// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool
670inline bool allclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) {
671 return at::_ops::allclose::call(self, other, rtol, atol, equal_nan);
672}
673
674// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
675inline at::Tensor any(const at::Tensor & self, int64_t dim, bool keepdim=false) {
676 return at::_ops::any_dim::call(self, dim, keepdim);
677}
678
679// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
680inline at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false) {
681 return at::_ops::any_out::call(self, dim, keepdim, out);
682}
683// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
684inline at::Tensor & any_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
685 return at::_ops::any_out::call(self, dim, keepdim, out);
686}
687
688// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor
689inline at::Tensor any(const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
690 return at::_ops::any_dimname::call(self, dim, keepdim);
691}
692
693// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
694inline at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
695 return at::_ops::any_dimname_out::call(self, dim, keepdim, out);
696}
697// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
698inline at::Tensor & any_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
699 return at::_ops::any_dimname_out::call(self, dim, keepdim, out);
700}
701
702// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
703inline at::Tensor arange(const at::Scalar & end, at::TensorOptions options={}) {
704 return at::_ops::arange::call(end, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
705}
706// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
708 return at::_ops::arange::call(end, dtype, layout, device, pin_memory);
709}
710
711// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
712inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options={}) {
713 return at::_ops::arange_start::call(start, end, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
714}
715// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
716inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
717 return at::_ops::arange_start::call(start, end, dtype, layout, device, pin_memory);
718}
719
720// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
721inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::TensorOptions options={}) {
722 return at::_ops::arange_start_step::call(start, end, step, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
723}
724// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
725inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
726 return at::_ops::arange_start_step::call(start, end, step, dtype, layout, device, pin_memory);
727}
728
729// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
730inline at::Tensor & arange_out(at::Tensor & out, const at::Scalar & end) {
731 return at::_ops::arange_out::call(end, out);
732}
733// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)
734inline at::Tensor & arange_outf(const at::Scalar & end, at::Tensor & out) {
735 return at::_ops::arange_out::call(end, out);
736}
737
738// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
739inline at::Tensor & arange_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step) {
740 return at::_ops::arange_start_out::call(start, end, step, out);
741}
742// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
743inline at::Tensor & arange_outf(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
744 return at::_ops::arange_start_out::call(start, end, step, out);
745}
746
747// aten::_dim_arange(Tensor like, int dim) -> Tensor
748inline at::Tensor _dim_arange(const at::Tensor & like, int64_t dim) {
749 return at::_ops::_dim_arange::call(like, dim);
750}
751
752// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
753inline at::Tensor argmax(const at::Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) {
754 return at::_ops::argmax::call(self, dim, keepdim);
755}
756
757// aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
758inline at::Tensor & argmax_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) {
759 return at::_ops::argmax_out::call(self, dim, keepdim, out);
760}
761// aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
762inline at::Tensor & argmax_outf(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
763 return at::_ops::argmax_out::call(self, dim, keepdim, out);
764}
765
766// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
767inline at::Tensor argmin(const at::Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) {
768 return at::_ops::argmin::call(self, dim, keepdim);
769}
770
771// aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
772inline at::Tensor & argmin_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) {
773 return at::_ops::argmin_out::call(self, dim, keepdim, out);
774}
775// aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
776inline at::Tensor & argmin_outf(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
777 return at::_ops::argmin_out::call(self, dim, keepdim, out);
778}
779
780// aten::acosh(Tensor self) -> Tensor
781inline at::Tensor acosh(const at::Tensor & self) {
782 return at::_ops::acosh::call(self);
783}
784
785// aten::acosh_(Tensor(a!) self) -> Tensor(a!)
786inline at::Tensor & acosh_(at::Tensor & self) {
787 return at::_ops::acosh_::call(self);
788}
789
790// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
791inline at::Tensor & acosh_out(at::Tensor & out, const at::Tensor & self) {
792 return at::_ops::acosh_out::call(self, out);
793}
794// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
795inline at::Tensor & acosh_outf(const at::Tensor & self, at::Tensor & out) {
796 return at::_ops::acosh_out::call(self, out);
797}
798
799// aten::arccosh(Tensor self) -> Tensor
800inline at::Tensor arccosh(const at::Tensor & self) {
801 return at::_ops::arccosh::call(self);
802}
803
804// aten::arccosh_(Tensor(a!) self) -> Tensor(a!)
806 return at::_ops::arccosh_::call(self);
807}
808
809// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
810inline at::Tensor & arccosh_out(at::Tensor & out, const at::Tensor & self) {
811 return at::_ops::arccosh_out::call(self, out);
812}
813// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
814inline at::Tensor & arccosh_outf(const at::Tensor & self, at::Tensor & out) {
815 return at::_ops::arccosh_out::call(self, out);
816}
817
818// aten::asinh(Tensor self) -> Tensor
819inline at::Tensor asinh(const at::Tensor & self) {
820 return at::_ops::asinh::call(self);
821}
822
823// aten::asinh_(Tensor(a!) self) -> Tensor(a!)
824inline at::Tensor & asinh_(at::Tensor & self) {
825 return at::_ops::asinh_::call(self);
826}
827
828// aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
829inline at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self) {
830 return at::_ops::asinh_out::call(self, out);
831}
832// aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
833inline at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out) {
834 return at::_ops::asinh_out::call(self, out);
835}
836
837// aten::arcsinh(Tensor self) -> Tensor
838inline at::Tensor arcsinh(const at::Tensor & self) {
839 return at::_ops::arcsinh::call(self);
840}
841
842// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!)
844 return at::_ops::arcsinh_::call(self);
845}
846
847// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
848inline at::Tensor & arcsinh_out(at::Tensor & out, const at::Tensor & self) {
849 return at::_ops::arcsinh_out::call(self, out);
850}
851// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
852inline at::Tensor & arcsinh_outf(const at::Tensor & self, at::Tensor & out) {
853 return at::_ops::arcsinh_out::call(self, out);
854}
855
856// aten::atanh(Tensor self) -> Tensor
857inline at::Tensor atanh(const at::Tensor & self) {
858 return at::_ops::atanh::call(self);
859}
860
861// aten::atanh_(Tensor(a!) self) -> Tensor(a!)
862inline at::Tensor & atanh_(at::Tensor & self) {
863 return at::_ops::atanh_::call(self);
864}
865
866// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
867inline at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) {
868 return at::_ops::atanh_out::call(self, out);
869}
870// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
871inline at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) {
872 return at::_ops::atanh_out::call(self, out);
873}
874
875// aten::arctanh(Tensor self) -> Tensor
876inline at::Tensor arctanh(const at::Tensor & self) {
877 return at::_ops::arctanh::call(self);
878}
879
880// aten::arctanh_(Tensor(a!) self) -> Tensor(a!)
882 return at::_ops::arctanh_::call(self);
883}
884
885// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
886inline at::Tensor & arctanh_out(at::Tensor & out, const at::Tensor & self) {
887 return at::_ops::arctanh_out::call(self, out);
888}
889// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
890inline at::Tensor & arctanh_outf(const at::Tensor & self, at::Tensor & out) {
891 return at::_ops::arctanh_out::call(self, out);
892}
893
894// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
895inline at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) {
896 return at::_ops::as_strided::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
897}
898namespace symint {
899 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
900 at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) {
901 return at::_ops::as_strided::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
902 }
903}
904
905// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)
906inline at::Tensor as_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) {
907 return at::_ops::as_strided::call(self, size, stride, storage_offset);
908}
909namespace symint {
910 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
911 at::Tensor as_strided(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) {
912 return at::_ops::as_strided::call(self, size, stride, storage_offset);
913 }
914}
915
916// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
917inline const at::Tensor & as_strided_(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) {
918 return at::_ops::as_strided_::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
919}
920namespace symint {
921 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
922 const at::Tensor & as_strided_(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) {
923 return at::_ops::as_strided_::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
924 }
925}
926
927// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)
928inline const at::Tensor & as_strided__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) {
929 return at::_ops::as_strided_::call(self, size, stride, storage_offset);
930}
931namespace symint {
932 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
933 const at::Tensor & as_strided_(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) {
934 return at::_ops::as_strided_::call(self, size, stride, storage_offset);
935 }
936}
937
938// aten::asin(Tensor self) -> Tensor
939inline at::Tensor asin(const at::Tensor & self) {
940 return at::_ops::asin::call(self);
941}
942
943// aten::asin_(Tensor(a!) self) -> Tensor(a!)
944inline at::Tensor & asin_(at::Tensor & self) {
945 return at::_ops::asin_::call(self);
946}
947
948// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
949inline at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) {
950 return at::_ops::asin_out::call(self, out);
951}
952// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
953inline at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) {
954 return at::_ops::asin_out::call(self, out);
955}
956
957// aten::arcsin(Tensor self) -> Tensor
958inline at::Tensor arcsin(const at::Tensor & self) {
959 return at::_ops::arcsin::call(self);
960}
961
962// aten::arcsin_(Tensor(a!) self) -> Tensor(a!)
963inline at::Tensor & arcsin_(at::Tensor & self) {
964 return at::_ops::arcsin_::call(self);
965}
966
967// aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
968inline at::Tensor & arcsin_out(at::Tensor & out, const at::Tensor & self) {
969 return at::_ops::arcsin_out::call(self, out);
970}
971// aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
972inline at::Tensor & arcsin_outf(const at::Tensor & self, at::Tensor & out) {
973 return at::_ops::arcsin_out::call(self, out);
974}
975
976// aten::atan(Tensor self) -> Tensor
977inline at::Tensor atan(const at::Tensor & self) {
978 return at::_ops::atan::call(self);
979}
980
981// aten::atan_(Tensor(a!) self) -> Tensor(a!)
982inline at::Tensor & atan_(at::Tensor & self) {
983 return at::_ops::atan_::call(self);
984}
985
986// aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
987inline at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self) {
988 return at::_ops::atan_out::call(self, out);
989}
990// aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
991inline at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out) {
992 return at::_ops::atan_out::call(self, out);
993}
994
995// aten::arctan(Tensor self) -> Tensor
996inline at::Tensor arctan(const at::Tensor & self) {
997 return at::_ops::arctan::call(self);
998}
999
1000// aten::arctan_(Tensor(a!) self) -> Tensor(a!)
1002 return at::_ops::arctan_::call(self);
1003}
1004
1005// aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1006inline at::Tensor & arctan_out(at::Tensor & out, const at::Tensor & self) {
1007 return at::_ops::arctan_out::call(self, out);
1008}
1009// aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1010inline at::Tensor & arctan_outf(const at::Tensor & self, at::Tensor & out) {
1011 return at::_ops::arctan_out::call(self, out);
1012}
1013
1014// aten::atleast_1d(Tensor self) -> Tensor
1015inline at::Tensor atleast_1d(const at::Tensor & self) {
1016 return at::_ops::atleast_1d::call(self);
1017}
1018
1019// aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]
1020inline ::std::vector<at::Tensor> atleast_1d(at::TensorList tensors) {
1021 return at::_ops::atleast_1d_Sequence::call(tensors);
1022}
1023
1024// aten::atleast_2d(Tensor self) -> Tensor
1025inline at::Tensor atleast_2d(const at::Tensor & self) {
1026 return at::_ops::atleast_2d::call(self);
1027}
1028
1029// aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]
1030inline ::std::vector<at::Tensor> atleast_2d(at::TensorList tensors) {
1031 return at::_ops::atleast_2d_Sequence::call(tensors);
1032}
1033
1034// aten::atleast_3d(Tensor self) -> Tensor
1035inline at::Tensor atleast_3d(const at::Tensor & self) {
1036 return at::_ops::atleast_3d::call(self);
1037}
1038
1039// aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]
1040inline ::std::vector<at::Tensor> atleast_3d(at::TensorList tensors) {
1041 return at::_ops::atleast_3d_Sequence::call(tensors);
1042}
1043
1044// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
1045inline at::Tensor baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
1046 return at::_ops::baddbmm::call(self, batch1, batch2, beta, alpha);
1047}
1048
1049// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
1050inline at::Tensor & baddbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
1051 return at::_ops::baddbmm_out::call(self, batch1, batch2, beta, alpha, out);
1052}
1053// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
1054inline at::Tensor & baddbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
1055 return at::_ops::baddbmm_out::call(self, batch1, batch2, beta, alpha, out);
1056}
1057
1058// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1059inline at::Tensor bartlett_window(int64_t window_length, at::TensorOptions options={}) {
1060 return at::_ops::bartlett_window::call(window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
1061}
1062// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1064 return at::_ops::bartlett_window::call(window_length, dtype, layout, device, pin_memory);
1065}
1066
1067// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1068inline at::Tensor bartlett_window(int64_t window_length, bool periodic, at::TensorOptions options={}) {
1069 return at::_ops::bartlett_window_periodic::call(window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
1070}
1071// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1072inline at::Tensor bartlett_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1073 return at::_ops::bartlett_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory);
1074}
1075
1076// aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
1077inline at::Tensor batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
1078 return at::_ops::batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
1079}
1080
1081// aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor
1082inline at::Tensor quantized_batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
1083 return at::_ops::quantized_batch_norm::call(input, weight, bias, mean, var, eps, output_scale, output_zero_point);
1084}
1085
1086// aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
1087inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t> _batch_norm_impl_index(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
1088 return at::_ops::_batch_norm_impl_index::call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
1089}
1090
1091// aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
1092inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
1093 return at::_ops::_batch_norm_impl_index_backward::call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
1094}
1095
1096// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
1098 return at::_ops::bernoulli::call(self, generator);
1099}
1100
1101// aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
1103 return at::_ops::bernoulli_out::call(self, generator, out);
1104}
1105// aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
1107 return at::_ops::bernoulli_out::call(self, generator, out);
1108}
1109
1110// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor
1112 return at::_ops::bernoulli_p::call(self, p, generator);
1113}
1114
1115// aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor
1116inline at::Tensor bilinear(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}) {
1117 return at::_ops::bilinear::call(input1, input2, weight, bias);
1118}
1119
1120// aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
1121inline at::Tensor binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean) {
1122 return at::_ops::binary_cross_entropy::call(self, target, weight, reduction);
1123}
1124
1125// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
1126inline at::Tensor & binary_cross_entropy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean) {
1127 return at::_ops::binary_cross_entropy_out::call(self, target, weight, reduction, out);
1128}
1129// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
1130inline at::Tensor & binary_cross_entropy_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
1131 return at::_ops::binary_cross_entropy_out::call(self, target, weight, reduction, out);
1132}
1133
1134// aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
1135inline at::Tensor binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean) {
1136 return at::_ops::binary_cross_entropy_backward::call(grad_output, self, target, weight, reduction);
1137}
1138
1139// aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
1140inline at::Tensor & binary_cross_entropy_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean) {
1141 return at::_ops::binary_cross_entropy_backward_grad_input::call(grad_output, self, target, weight, reduction, grad_input);
1142}
1143// aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
1144inline at::Tensor & binary_cross_entropy_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
1145 return at::_ops::binary_cross_entropy_backward_grad_input::call(grad_output, self, target, weight, reduction, grad_input);
1146}
1147
1148// aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
1149inline at::Tensor binary_cross_entropy_with_logits(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, const c10::optional<at::Tensor> & pos_weight={}, int64_t reduction=at::Reduction::Mean) {
1150 return at::_ops::binary_cross_entropy_with_logits::call(self, target, weight, pos_weight, reduction);
1151}
1152
1153// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor
1154inline at::Tensor bincount(const at::Tensor & self, const c10::optional<at::Tensor> & weights={}, int64_t minlength=0) {
1155 return at::_ops::bincount::call(self, weights, minlength);
1156}
1157
1158// aten::bitwise_not(Tensor self) -> Tensor
1159inline at::Tensor bitwise_not(const at::Tensor & self) {
1160 return at::_ops::bitwise_not::call(self);
1161}
1162
1163// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1164inline at::Tensor & bitwise_not_out(at::Tensor & out, const at::Tensor & self) {
1165 return at::_ops::bitwise_not_out::call(self, out);
1166}
1167// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1168inline at::Tensor & bitwise_not_outf(const at::Tensor & self, at::Tensor & out) {
1169 return at::_ops::bitwise_not_out::call(self, out);
1170}
1171
1172// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1173inline at::Tensor & copysign_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1174 return at::_ops::copysign_out::call(self, other, out);
1175}
1176// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1177inline at::Tensor & copysign_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1178 return at::_ops::copysign_out::call(self, other, out);
1179}
1180
1181// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor
1182inline at::Tensor copysign(const at::Tensor & self, const at::Tensor & other) {
1183 return at::_ops::copysign_Tensor::call(self, other);
1184}
1185
1186// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor
1187inline at::Tensor copysign(const at::Tensor & self, const at::Scalar & other) {
1188 return at::_ops::copysign_Scalar::call(self, other);
1189}
1190
1191// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
1192inline at::Tensor & copysign_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
1193 return at::_ops::copysign_Scalar_out::call(self, other, out);
1194}
1195// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
1196inline at::Tensor & copysign_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
1197 return at::_ops::copysign_Scalar_out::call(self, other, out);
1198}
1199
1200// aten::logical_not(Tensor self) -> Tensor
1201inline at::Tensor logical_not(const at::Tensor & self) {
1202 return at::_ops::logical_not::call(self);
1203}
1204
1205// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1206inline at::Tensor & logical_not_out(at::Tensor & out, const at::Tensor & self) {
1207 return at::_ops::logical_not_out::call(self, out);
1208}
1209// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1210inline at::Tensor & logical_not_outf(const at::Tensor & self, at::Tensor & out) {
1211 return at::_ops::logical_not_out::call(self, out);
1212}
1213
1214// aten::logical_xor(Tensor self, Tensor other) -> Tensor
1215inline at::Tensor logical_xor(const at::Tensor & self, const at::Tensor & other) {
1216 return at::_ops::logical_xor::call(self, other);
1217}
1218
1219// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1220inline at::Tensor & logical_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1221 return at::_ops::logical_xor_out::call(self, other, out);
1222}
1223// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1224inline at::Tensor & logical_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1225 return at::_ops::logical_xor_out::call(self, other, out);
1226}
1227
1228// aten::logical_and(Tensor self, Tensor other) -> Tensor
1229inline at::Tensor logical_and(const at::Tensor & self, const at::Tensor & other) {
1230 return at::_ops::logical_and::call(self, other);
1231}
1232
1233// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1234inline at::Tensor & logical_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1235 return at::_ops::logical_and_out::call(self, other, out);
1236}
1237// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1238inline at::Tensor & logical_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1239 return at::_ops::logical_and_out::call(self, other, out);
1240}
1241
1242// aten::logical_or(Tensor self, Tensor other) -> Tensor
1243inline at::Tensor logical_or(const at::Tensor & self, const at::Tensor & other) {
1244 return at::_ops::logical_or::call(self, other);
1245}
1246
1247// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1248inline at::Tensor & logical_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
1249 return at::_ops::logical_or_out::call(self, other, out);
1250}
1251// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1252inline at::Tensor & logical_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
1253 return at::_ops::logical_or_out::call(self, other, out);
1254}
1255
1256// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1257inline at::Tensor blackman_window(int64_t window_length, at::TensorOptions options={}) {
1258 return at::_ops::blackman_window::call(window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
1259}
1260// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1262 return at::_ops::blackman_window::call(window_length, dtype, layout, device, pin_memory);
1263}
1264
1265// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1266inline at::Tensor blackman_window(int64_t window_length, bool periodic, at::TensorOptions options={}) {
1267 return at::_ops::blackman_window_periodic::call(window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
1268}
1269// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1270inline at::Tensor blackman_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
1271 return at::_ops::blackman_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory);
1272}
1273
1274// aten::bmm(Tensor self, Tensor mat2) -> Tensor
1275inline at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2) {
1276 return at::_ops::bmm::call(self, mat2);
1277}
1278
1279// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
1280inline at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
1281 return at::_ops::bmm_out::call(self, mat2, out);
1282}
1283// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
1284inline at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
1285 return at::_ops::bmm_out::call(self, mat2, out);
1286}
1287
1288// aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]
1289inline ::std::vector<at::Tensor> broadcast_tensors(at::TensorList tensors) {
1290 return at::_ops::broadcast_tensors::call(tensors);
1291}
1292
1293// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
1294inline at::Tensor broadcast_to(const at::Tensor & self, at::IntArrayRef size) {
1295 return at::_ops::broadcast_to::call(self, c10::fromIntArrayRefSlow(size));
1296}
1297namespace symint {
1298 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
1299 at::Tensor broadcast_to(const at::Tensor & self, at::IntArrayRef size) {
1300 return at::_ops::broadcast_to::call(self, c10::fromIntArrayRefSlow(size));
1301 }
1302}
1303
1304// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
1305inline at::Tensor broadcast_to_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
1306 return at::_ops::broadcast_to::call(self, size);
1307}
1308namespace symint {
1309 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
1310 at::Tensor broadcast_to(const at::Tensor & self, c10::SymIntArrayRef size) {
1311 return at::_ops::broadcast_to::call(self, size);
1312 }
1313}
1314
1315// aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)
1316inline at::Tensor _sparse_broadcast_to(const at::Tensor & self, at::IntArrayRef size) {
1317 return at::_ops::_sparse_broadcast_to::call(self, size);
1318}
1319
1320// aten::cat(Tensor[] tensors, int dim=0) -> Tensor
1321inline at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim=0) {
1322 return at::_ops::cat::call(tensors, dim);
1323}
1324
1325// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
1326inline at::Tensor & cat_out(at::Tensor & out, const at::ITensorListRef & tensors, int64_t dim=0) {
1327 return at::_ops::cat_out::call(tensors, dim, out);
1328}
1329// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
1330inline at::Tensor & cat_outf(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) {
1331 return at::_ops::cat_out::call(tensors, dim, out);
1332}
1333
1334// aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor
1335inline at::Tensor cat(at::TensorList tensors, at::Dimname dim) {
1336 return at::_ops::cat_names::call(tensors, dim);
1337}
1338
1339// aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1340inline at::Tensor & cat_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim) {
1341 return at::_ops::cat_names_out::call(tensors, dim, out);
1342}
1343// aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1344inline at::Tensor & cat_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
1345 return at::_ops::cat_names_out::call(tensors, dim, out);
1346}
1347
1348// aten::concat(Tensor[] tensors, int dim=0) -> Tensor
1349inline at::Tensor concat(at::TensorList tensors, int64_t dim=0) {
1350 return at::_ops::concat::call(tensors, dim);
1351}
1352
1353// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
1354inline at::Tensor & concat_out(at::Tensor & out, at::TensorList tensors, int64_t dim=0) {
1355 return at::_ops::concat_out::call(tensors, dim, out);
1356}
1357// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
1358inline at::Tensor & concat_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) {
1359 return at::_ops::concat_out::call(tensors, dim, out);
1360}
1361
1362// aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor
1363inline at::Tensor concat(at::TensorList tensors, at::Dimname dim) {
1364 return at::_ops::concat_names::call(tensors, dim);
1365}
1366
1367// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1368inline at::Tensor & concat_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim) {
1369 return at::_ops::concat_names_out::call(tensors, dim, out);
1370}
1371// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1372inline at::Tensor & concat_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
1373 return at::_ops::concat_names_out::call(tensors, dim, out);
1374}
1375
1376// aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor
1377inline at::Tensor concatenate(at::TensorList tensors, int64_t dim=0) {
1378 return at::_ops::concatenate::call(tensors, dim);
1379}
1380
1381// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
1382inline at::Tensor & concatenate_out(at::Tensor & out, at::TensorList tensors, int64_t dim=0) {
1383 return at::_ops::concatenate_out::call(tensors, dim, out);
1384}
1385// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
1386inline at::Tensor & concatenate_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) {
1387 return at::_ops::concatenate_out::call(tensors, dim, out);
1388}
1389
1390// aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor
1391inline at::Tensor concatenate(at::TensorList tensors, at::Dimname dim) {
1392 return at::_ops::concatenate_names::call(tensors, dim);
1393}
1394
1395// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1396inline at::Tensor & concatenate_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim) {
1397 return at::_ops::concatenate_names_out::call(tensors, dim, out);
1398}
1399// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1400inline at::Tensor & concatenate_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
1401 return at::_ops::concatenate_names_out::call(tensors, dim, out);
1402}
1403
1404// aten::block_diag(Tensor[] tensors) -> Tensor
1406 return at::_ops::block_diag::call(tensors);
1407}
1408
1409// aten::ceil(Tensor self) -> Tensor
1410inline at::Tensor ceil(const at::Tensor & self) {
1411 return at::_ops::ceil::call(self);
1412}
1413
1414// aten::ceil_(Tensor(a!) self) -> Tensor(a!)
1415inline at::Tensor & ceil_(at::Tensor & self) {
1416 return at::_ops::ceil_::call(self);
1417}
1418
1419// aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1420inline at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self) {
1421 return at::_ops::ceil_out::call(self, out);
1422}
1423// aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1424inline at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out) {
1425 return at::_ops::ceil_out::call(self, out);
1426}
1427
1428// aten::chain_matmul(Tensor[] matrices) -> Tensor
1430 return at::_ops::chain_matmul::call(matrices);
1431}
1432
1433// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
1435 return at::_ops::chain_matmul_out::call(matrices, out);
1436}
1437// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
1439 return at::_ops::chain_matmul_out::call(matrices, out);
1440}
1441
1442// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]
1443inline ::std::vector<at::Tensor> unsafe_chunk(const at::Tensor & self, int64_t chunks, int64_t dim=0) {
1444 return at::_ops::unsafe_chunk::call(self, chunks, dim);
1445}
1446
1447// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
1448inline ::std::vector<at::Tensor> chunk(const at::Tensor & self, int64_t chunks, int64_t dim=0) {
1449 return at::_ops::chunk::call(self, chunks, dim);
1450}
1451
1452// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
1453inline ::std::vector<at::Tensor> tensor_split(const at::Tensor & self, int64_t sections, int64_t dim=0) {
1454 return at::_ops::tensor_split_sections::call(self, sections, dim);
1455}
1456namespace symint {
1457 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
1458 ::std::vector<at::Tensor> tensor_split(const at::Tensor & self, int64_t sections, int64_t dim=0) {
1459 return at::_ops::tensor_split_sections::call(self, sections, dim);
1460 }
1461}
1462
1463// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
1464inline ::std::vector<at::Tensor> tensor_split_symint(const at::Tensor & self, c10::SymInt sections, int64_t dim=0) {
1465 return at::_ops::tensor_split_sections::call(self, sections, dim);
1466}
1467namespace symint {
1468 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
1469 ::std::vector<at::Tensor> tensor_split(const at::Tensor & self, c10::SymInt sections, int64_t dim=0) {
1470 return at::_ops::tensor_split_sections::call(self, sections, dim);
1471 }
1472}
1473
1474// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
1475inline ::std::vector<at::Tensor> tensor_split(const at::Tensor & self, at::IntArrayRef indices, int64_t dim=0) {
1476 return at::_ops::tensor_split_indices::call(self, c10::fromIntArrayRefSlow(indices), dim);
1477}
1478namespace symint {
1479 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
1480 ::std::vector<at::Tensor> tensor_split(const at::Tensor & self, at::IntArrayRef indices, int64_t dim=0) {
1481 return at::_ops::tensor_split_indices::call(self, c10::fromIntArrayRefSlow(indices), dim);
1482 }
1483}
1484
1485// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]
1486inline ::std::vector<at::Tensor> tensor_split_symint(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim=0) {
1487 return at::_ops::tensor_split_indices::call(self, indices, dim);
1488}
1489namespace symint {
1490 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
1491 ::std::vector<at::Tensor> tensor_split(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim=0) {
1492 return at::_ops::tensor_split_indices::call(self, indices, dim);
1493 }
1494}
1495
1496// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]
1497inline ::std::vector<at::Tensor> tensor_split(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim=0) {
1498 return at::_ops::tensor_split_tensor_indices_or_sections::call(self, tensor_indices_or_sections, dim);
1499}
1500
1501// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
1503 return at::_ops::clamp::call(self, min, max);
1504}
1505
1506// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
1508 return at::_ops::clamp_Tensor::call(self, min, max);
1509}
1510
1511// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
1513 return at::_ops::clamp_::call(self, min, max);
1514}
1515
1516// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
1518 return at::_ops::clamp__Tensor::call(self, min, max);
1519}
1520
1521// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
1523 return at::_ops::clamp_out::call(self, min, max, out);
1524}
1525// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
1527 return at::_ops::clamp_out::call(self, min, max, out);
1528}
1529
1530// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
1532 return at::_ops::clamp_Tensor_out::call(self, min, max, out);
1533}
1534// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
1536 return at::_ops::clamp_Tensor_out::call(self, min, max, out);
1537}
1538
1539// aten::clamp_max(Tensor self, Scalar max) -> Tensor
1540inline at::Tensor clamp_max(const at::Tensor & self, const at::Scalar & max) {
1541 return at::_ops::clamp_max::call(self, max);
1542}
1543
1544// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor
1545inline at::Tensor clamp_max(const at::Tensor & self, const at::Tensor & max) {
1546 return at::_ops::clamp_max_Tensor::call(self, max);
1547}
1548
1549// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
1550inline at::Tensor & clamp_max_(at::Tensor & self, const at::Scalar & max) {
1551 return at::_ops::clamp_max_::call(self, max);
1552}
1553
1554// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)
1555inline at::Tensor & clamp_max_(at::Tensor & self, const at::Tensor & max) {
1556 return at::_ops::clamp_max__Tensor::call(self, max);
1557}
1558
1559// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
1560inline at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & max) {
1561 return at::_ops::clamp_max_out::call(self, max, out);
1562}
1563// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
1564inline at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Scalar & max, at::Tensor & out) {
1565 return at::_ops::clamp_max_out::call(self, max, out);
1566}
1567
1568// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
1569inline at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & max) {
1570 return at::_ops::clamp_max_Tensor_out::call(self, max, out);
1571}
1572// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
1573inline at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Tensor & max, at::Tensor & out) {
1574 return at::_ops::clamp_max_Tensor_out::call(self, max, out);
1575}
1576
1577// aten::clamp_min(Tensor self, Scalar min) -> Tensor
1578inline at::Tensor clamp_min(const at::Tensor & self, const at::Scalar & min) {
1579 return at::_ops::clamp_min::call(self, min);
1580}
1581
1582// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor
1583inline at::Tensor clamp_min(const at::Tensor & self, const at::Tensor & min) {
1584 return at::_ops::clamp_min_Tensor::call(self, min);
1585}
1586
1587// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
1588inline at::Tensor & clamp_min_(at::Tensor & self, const at::Scalar & min) {
1589 return at::_ops::clamp_min_::call(self, min);
1590}
1591
1592// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)
1593inline at::Tensor & clamp_min_(at::Tensor & self, const at::Tensor & min) {
1594 return at::_ops::clamp_min__Tensor::call(self, min);
1595}
1596
1597// aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
1598inline at::Tensor & clamp_min_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min) {
1599 return at::_ops::clamp_min_out::call(self, min, out);
1600}
1601// aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
1602inline at::Tensor & clamp_min_outf(const at::Tensor & self, const at::Scalar & min, at::Tensor & out) {
1603 return at::_ops::clamp_min_out::call(self, min, out);
1604}
1605
1606// aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
1607inline at::Tensor & clamp_min_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & min) {
1608 return at::_ops::clamp_min_Tensor_out::call(self, min, out);
1609}
1610// aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
1611inline at::Tensor & clamp_min_outf(const at::Tensor & self, const at::Tensor & min, at::Tensor & out) {
1612 return at::_ops::clamp_min_Tensor_out::call(self, min, out);
1613}
1614
1615// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
1617 return at::_ops::clip::call(self, min, max);
1618}
1619
1620// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
1622 return at::_ops::clip_Tensor::call(self, min, max);
1623}
1624
1625// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
1627 return at::_ops::clip_::call(self, min, max);
1628}
1629
1630// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
1632 return at::_ops::clip__Tensor::call(self, min, max);
1633}
1634
1635// aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
1637 return at::_ops::clip_out::call(self, min, max, out);
1638}
1639// aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
1641 return at::_ops::clip_out::call(self, min, max, out);
1642}
1643
1644// aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
1646 return at::_ops::clip_Tensor_out::call(self, min, max, out);
1647}
1648// aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
1650 return at::_ops::clip_Tensor_out::call(self, min, max, out);
1651}
1652
1653// aten::cudnn_is_acceptable(Tensor self) -> bool
1654inline bool cudnn_is_acceptable(const at::Tensor & self) {
1655 return at::_ops::cudnn_is_acceptable::call(self);
1656}
1657
1658// aten::complex(Tensor real, Tensor imag) -> Tensor
1660 return at::_ops::complex::call(real, imag);
1661}
1662
1663// aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)
1665 return at::_ops::complex_out::call(real, imag, out);
1666}
1667// aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)
1669 return at::_ops::complex_out::call(real, imag, out);
1670}
1671
1672// aten::polar(Tensor abs, Tensor angle) -> Tensor
1673inline at::Tensor polar(const at::Tensor & abs, const at::Tensor & angle) {
1674 return at::_ops::polar::call(abs, angle);
1675}
1676
1677// aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)
1678inline at::Tensor & polar_out(at::Tensor & out, const at::Tensor & abs, const at::Tensor & angle) {
1679 return at::_ops::polar_out::call(abs, angle, out);
1680}
1681// aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)
1682inline at::Tensor & polar_outf(const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out) {
1683 return at::_ops::polar_out::call(abs, angle, out);
1684}
1685
1686// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
1687inline at::Tensor constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) {
1688 return at::_ops::constant_pad_nd::call(self, c10::fromIntArrayRefSlow(pad), value);
1689}
1690namespace symint {
1691 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
1692 at::Tensor constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) {
1693 return at::_ops::constant_pad_nd::call(self, c10::fromIntArrayRefSlow(pad), value);
1694 }
1695}
1696
1697// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
1698inline at::Tensor constant_pad_nd_symint(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) {
1699 return at::_ops::constant_pad_nd::call(self, pad, value);
1700}
1701namespace symint {
1702 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
1703 at::Tensor constant_pad_nd(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) {
1704 return at::_ops::constant_pad_nd::call(self, pad, value);
1705 }
1706}
1707
1708// aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor
1709inline at::Tensor convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
1710 return at::_ops::convolution::call(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups);
1711}
1712namespace symint {
1713 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
1714 at::Tensor convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
1715 return at::_ops::convolution::call(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups);
1716 }
1717}
1718
1719// aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor
1720inline at::Tensor convolution_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups) {
1721 return at::_ops::convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
1722}
1723namespace symint {
1724 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
1725 at::Tensor convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups) {
1726 return at::_ops::convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
1727 }
1728}
1729
1730// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1731inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1732 return at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask);
1733}
1734namespace symint {
1735 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
1736 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1737 return at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask);
1738 }
1739}
1740
1741// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1742inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_symint(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1743 return at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
1744}
1745namespace symint {
1746 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
1747 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1748 return at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
1749 }
1750}
1751
1752// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
1753inline at::Tensor convolution_overrideable(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
1754 return at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
1755}
1756
1757// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
1758inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_overrideable(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1759 return at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask);
1760}
1761
1762// aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
1763inline at::Tensor _convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
1764 return at::_ops::_convolution::call(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
1765}
1766namespace symint {
1767 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
1768 at::Tensor _convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
1769 return at::_ops::_convolution::call(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
1770 }
1771}
1772
1773// aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor
1774inline at::Tensor _convolution_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
1775 return at::_ops::_convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
1776}
1777namespace symint {
1778 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
1779 at::Tensor _convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
1780 return at::_ops::_convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
1781 }
1782}
1783
1784// aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
1785inline at::Tensor _convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
1786 return at::_ops::_convolution_deprecated::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
1787}
1788
1789// aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor
1790inline at::Tensor _convolution_mode(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
1791 return at::_ops::_convolution_mode::call(input, weight, bias, stride, padding, dilation, groups);
1792}
1793
1794// aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1795inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1796 return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask);
1797}
1798namespace symint {
1799 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
1800 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1801 return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask);
1802 }
1803}
1804
1805// aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1806inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward_symint(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1807 return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
1808}
1809namespace symint {
1810 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
1811 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
1812 return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
1813 }
1814}
1815
1816// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor
1817inline at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) {
1818 return at::_ops::conv1d::call(input, weight, bias, stride, padding, dilation, groups);
1819}
1820
1821// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor
1822inline at::Tensor conv2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) {
1823 return at::_ops::conv2d::call(input, weight, bias, stride, padding, dilation, groups);
1824}
1825
1826// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor
1827inline at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) {
1828 return at::_ops::conv3d::call(input, weight, bias, stride, padding, dilation, groups);
1829}
1830
1831// aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor
1832inline at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) {
1833 return at::_ops::conv1d_padding::call(input, weight, bias, stride, padding, dilation, groups);
1834}
1835
1836// aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor
1837inline at::Tensor conv2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) {
1838 return at::_ops::conv2d_padding::call(input, weight, bias, stride, padding, dilation, groups);
1839}
1840
1841// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor
1842inline at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) {
1843 return at::_ops::conv3d_padding::call(input, weight, bias, stride, padding, dilation, groups);
1844}
1845
1846// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
1847inline at::Tensor conv_tbc(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad=0) {
1848 return at::_ops::conv_tbc::call(self, weight, bias, pad);
1849}
1850
1851// aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
1852inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
1853 return at::_ops::conv_tbc_backward::call(self, input, weight, bias, pad);
1854}
1855
1856// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor
1857inline at::Tensor conv_transpose1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) {
1858 return at::_ops::conv_transpose1d::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
1859}
1860
1861// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor
1862inline at::Tensor conv_transpose2d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) {
1863 return at::_ops::conv_transpose2d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
1864}
1865
1866// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor
1867inline at::Tensor conv_transpose3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) {
1868 return at::_ops::conv_transpose3d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
1869}
1870
1871// aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
1872inline at::Tensor copy(const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) {
1873 return at::_ops::copy::call(self, src, non_blocking);
1874}
1875
1876// aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
1877inline at::Tensor _copy_from(const at::Tensor & self, const at::Tensor & dst, bool non_blocking=false) {
1878 return at::_ops::_copy_from::call(self, dst, non_blocking);
1879}
1880
1881// aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor
1882inline at::Tensor _copy_from_and_resize(const at::Tensor & self, const at::Tensor & dst) {
1883 return at::_ops::_copy_from_and_resize::call(self, dst);
1884}
1885
1886// aten::cos(Tensor self) -> Tensor
1887inline at::Tensor cos(const at::Tensor & self) {
1888 return at::_ops::cos::call(self);
1889}
1890
1891// aten::cos_(Tensor(a!) self) -> Tensor(a!)
1892inline at::Tensor & cos_(at::Tensor & self) {
1893 return at::_ops::cos_::call(self);
1894}
1895
1896// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1897inline at::Tensor & cos_out(at::Tensor & out, const at::Tensor & self) {
1898 return at::_ops::cos_out::call(self, out);
1899}
1900// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1901inline at::Tensor & cos_outf(const at::Tensor & self, at::Tensor & out) {
1902 return at::_ops::cos_out::call(self, out);
1903}
1904
1905// aten::cosh(Tensor self) -> Tensor
1906inline at::Tensor cosh(const at::Tensor & self) {
1907 return at::_ops::cosh::call(self);
1908}
1909
1910// aten::cosh_(Tensor(a!) self) -> Tensor(a!)
1911inline at::Tensor & cosh_(at::Tensor & self) {
1912 return at::_ops::cosh_::call(self);
1913}
1914
1915// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1916inline at::Tensor & cosh_out(at::Tensor & out, const at::Tensor & self) {
1917 return at::_ops::cosh_out::call(self, out);
1918}
1919// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1920inline at::Tensor & cosh_outf(const at::Tensor & self, at::Tensor & out) {
1921 return at::_ops::cosh_out::call(self, out);
1922}
1923
1924// aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
1925inline at::Tensor cosine_embedding_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean) {
1926 return at::_ops::cosine_embedding_loss::call(input1, input2, target, margin, reduction);
1927}
1928
1929// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
1930inline at::Tensor count_nonzero(const at::Tensor & self, at::IntArrayRef dim) {
1931 return at::_ops::count_nonzero_dim_IntList::call(self, dim);
1932}
1933
1934// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor
1936 return at::_ops::count_nonzero::call(self, dim);
1937}
1938
1939// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
1940inline at::Tensor cov(const at::Tensor & self, int64_t correction=1, const c10::optional<at::Tensor> & fweights={}, const c10::optional<at::Tensor> & aweights={}) {
1941 return at::_ops::cov::call(self, correction, fweights, aweights);
1942}
1943
1944// aten::corrcoef(Tensor self) -> Tensor
1945inline at::Tensor corrcoef(const at::Tensor & self) {
1946 return at::_ops::corrcoef::call(self);
1947}
1948
1949// aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
1950inline at::Tensor cudnn_affine_grid_generator(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
1951 return at::_ops::cudnn_affine_grid_generator::call(theta, N, C, H, W);
1952}
1953
1954// aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta
1955inline at::Tensor cudnn_affine_grid_generator_backward(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) {
1956 return at::_ops::cudnn_affine_grid_generator_backward::call(grad, N, C, H, W);
1957}
1958
1959// aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)
1960inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
1961 return at::_ops::cudnn_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
1962}
1963
1964// aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)
1965inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_backward(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace) {
1966 return at::_ops::cudnn_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace);
1967}
1968
1969// aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
1970inline at::Tensor cudnn_convolution(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
1971 return at::_ops::cudnn_convolution::call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
1972}
1973
1974// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor
1975inline at::Tensor cudnn_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
1976 return at::_ops::cudnn_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
1977}
1978
1979// aten::_mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor
1980inline at::Tensor _mps_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
1981 return at::_ops::_mps_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups);
1982}
1983
1984// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor)
1985inline ::std::tuple<at::Tensor,at::Tensor> mps_convolution_transpose_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask) {
1986 return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask);
1987}
1988
1989// aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
1990inline at::Tensor cudnn_convolution_relu(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
1991 return at::_ops::cudnn_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups);
1992}
1993
1994// aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
1995inline at::Tensor cudnn_convolution_add_relu(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
1996 return at::_ops::cudnn_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
1997}
1998
1999// aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output
2000inline at::Tensor cudnn_grid_sampler(const at::Tensor & self, const at::Tensor & grid) {
2001 return at::_ops::cudnn_grid_sampler::call(self, grid);
2002}
2003
2004// aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
2005inline ::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
2006 return at::_ops::cudnn_grid_sampler_backward::call(self, grid, grad_output);
2007}
2008
2009// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
2010inline ::std::tuple<at::Tensor,at::Tensor> cummax(const at::Tensor & self, int64_t dim) {
2011 return at::_ops::cummax::call(self, dim);
2012}
2013
2014// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2015inline ::std::tuple<at::Tensor &,at::Tensor &> cummax_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim) {
2016 return at::_ops::cummax_out::call(self, dim, values, indices);
2017}
2018// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2019inline ::std::tuple<at::Tensor &,at::Tensor &> cummax_outf(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
2020 return at::_ops::cummax_out::call(self, dim, values, indices);
2021}
2022
2023// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
2024inline ::std::tuple<at::Tensor,at::Tensor> cummax(const at::Tensor & self, at::Dimname dim) {
2025 return at::_ops::cummax_dimname::call(self, dim);
2026}
2027
2028// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2029inline ::std::tuple<at::Tensor &,at::Tensor &> cummax_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) {
2030 return at::_ops::cummax_dimname_out::call(self, dim, values, indices);
2031}
2032// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2033inline ::std::tuple<at::Tensor &,at::Tensor &> cummax_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
2034 return at::_ops::cummax_dimname_out::call(self, dim, values, indices);
2035}
2036
2037// aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
2038inline void _cummax_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
2039 return at::_ops::_cummax_helper::call(self, values, indices, dim);
2040}
2041
2042// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
2043inline ::std::tuple<at::Tensor,at::Tensor> cummin(const at::Tensor & self, int64_t dim) {
2044 return at::_ops::cummin::call(self, dim);
2045}
2046
2047// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2048inline ::std::tuple<at::Tensor &,at::Tensor &> cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim) {
2049 return at::_ops::cummin_out::call(self, dim, values, indices);
2050}
2051// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2052inline ::std::tuple<at::Tensor &,at::Tensor &> cummin_outf(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
2053 return at::_ops::cummin_out::call(self, dim, values, indices);
2054}
2055
2056// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
2057inline ::std::tuple<at::Tensor,at::Tensor> cummin(const at::Tensor & self, at::Dimname dim) {
2058 return at::_ops::cummin_dimname::call(self, dim);
2059}
2060
2061// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2062inline ::std::tuple<at::Tensor &,at::Tensor &> cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) {
2063 return at::_ops::cummin_dimname_out::call(self, dim, values, indices);
2064}
2065// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
2066inline ::std::tuple<at::Tensor &,at::Tensor &> cummin_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
2067 return at::_ops::cummin_dimname_out::call(self, dim, values, indices);
2068}
2069
2070// aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
2071inline void _cummin_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
2072 return at::_ops::_cummin_helper::call(self, values, indices, dim);
2073}
2074
2075// aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor
2076inline at::Tensor cummaxmin_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
2077 return at::_ops::cummaxmin_backward::call(grad, input, indices, dim);
2078}
2079
2080// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
2082 return at::_ops::cumprod::call(self, dim, dtype);
2083}
2084
2085// aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2087 return at::_ops::cumprod_out::call(self, dim, dtype, out);
2088}
2089// aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2090inline at::Tensor & cumprod_outf(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2091 return at::_ops::cumprod_out::call(self, dim, dtype, out);
2092}
2093
2094// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2095inline at::Tensor cumprod(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype=c10::nullopt) {
2096 return at::_ops::cumprod_dimname::call(self, dim, dtype);
2097}
2098
2099// aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2100inline at::Tensor & cumprod_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype=c10::nullopt) {
2101 return at::_ops::cumprod_dimname_out::call(self, dim, dtype, out);
2102}
2103// aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2104inline at::Tensor & cumprod_outf(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2105 return at::_ops::cumprod_dimname_out::call(self, dim, dtype, out);
2106}
2107
2108// aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor
2109inline at::Tensor cumprod_backward(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
2110 return at::_ops::cumprod_backward::call(grad, input, dim, output);
2111}
2112
2113// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
2115 return at::_ops::cumsum::call(self, dim, dtype);
2116}
2117
2118// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2120 return at::_ops::cumsum_out::call(self, dim, dtype, out);
2121}
2122// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2123inline at::Tensor & cumsum_outf(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2124 return at::_ops::cumsum_out::call(self, dim, dtype, out);
2125}
2126
2127// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2128inline at::Tensor cumsum(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype=c10::nullopt) {
2129 return at::_ops::cumsum_dimname::call(self, dim, dtype);
2130}
2131
2132// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2133inline at::Tensor & cumsum_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype=c10::nullopt) {
2134 return at::_ops::cumsum_dimname_out::call(self, dim, dtype, out);
2135}
2136// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2137inline at::Tensor & cumsum_outf(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
2138 return at::_ops::cumsum_dimname_out::call(self, dim, dtype, out);
2139}
2140
2141// aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
2142inline at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) {
2143 return at::_ops::cumulative_trapezoid_x::call(y, x, dim);
2144}
2145
2146// aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
2147inline at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Scalar & dx=1, int64_t dim=-1) {
2148 return at::_ops::cumulative_trapezoid_dx::call(y, dx, dim);
2149}
2150
2151// aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
2152inline at::Tensor ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, int64_t reduction=at::Reduction::Mean, bool zero_infinity=false) {
2153 return at::_ops::ctc_loss_IntList::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
2154}
2155
2156// aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
2157inline at::Tensor ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, int64_t reduction=at::Reduction::Mean, bool zero_infinity=false) {
2158 return at::_ops::ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
2159}
2160
2161// aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
2162inline ::std::tuple<at::Tensor,at::Tensor> _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false) {
2163 return at::_ops::_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
2164}
2165
2166// aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
2167inline ::std::tuple<at::Tensor,at::Tensor> _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, bool zero_infinity=false) {
2168 return at::_ops::_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
2169}
2170
2171// aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
2172inline at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false) {
2173 return at::_ops::_ctc_loss_backward::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
2174}
2175
2176// aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
2177inline at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false) {
2178 return at::_ops::_ctc_loss_backward_Tensor::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
2179}
2180
2181// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor
2182inline at::Tensor diag_embed(const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) {
2183 return at::_ops::diag_embed::call(self, offset, dim1, dim2);
2184}
2185
2186// aten::diagflat(Tensor self, int offset=0) -> Tensor
2187inline at::Tensor diagflat(const at::Tensor & self, int64_t offset=0) {
2188 return at::_ops::diagflat::call(self, offset);
2189}
2190
2191// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
2192inline at::Tensor diagonal(const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) {
2193 return at::_ops::diagonal::call(self, offset, dim1, dim2);
2194}
2195
2196// aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)
2197inline at::Tensor linalg_diagonal(const at::Tensor & A, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) {
2198 return at::_ops::linalg_diagonal::call(A, offset, dim1, dim2);
2199}
2200
2201// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
2202inline at::Tensor diagonal(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset=0) {
2203 return at::_ops::diagonal_Dimname::call(self, outdim, dim1, dim2, offset);
2204}
2205
2206// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor
2207inline at::Tensor diagonal_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
2208 return at::_ops::diagonal_backward::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2);
2209}
2210namespace symint {
2211 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2212 at::Tensor diagonal_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
2213 return at::_ops::diagonal_backward::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2);
2214 }
2215}
2216
2217// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor
2218inline at::Tensor diagonal_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
2219 return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2);
2220}
2221namespace symint {
2222 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2223 at::Tensor diagonal_backward(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
2224 return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2);
2225 }
2226}
2227
2228// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
2229inline at::Tensor diff(const at::Tensor & self, int64_t n=1, int64_t dim=-1, const c10::optional<at::Tensor> & prepend={}, const c10::optional<at::Tensor> & append={}) {
2230 return at::_ops::diff::call(self, n, dim, prepend, append);
2231}
2232
2233// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
2234inline at::Tensor & diff_out(at::Tensor & out, const at::Tensor & self, int64_t n=1, int64_t dim=-1, const c10::optional<at::Tensor> & prepend={}, const c10::optional<at::Tensor> & append={}) {
2235 return at::_ops::diff_out::call(self, n, dim, prepend, append, out);
2236}
2237// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
2238inline at::Tensor & diff_outf(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append, at::Tensor & out) {
2239 return at::_ops::diff_out::call(self, n, dim, prepend, append, out);
2240}
2241
2242// aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]
2243inline ::std::vector<at::Tensor> gradient(const at::Tensor & self, const c10::optional<at::Scalar> & spacing=c10::nullopt, c10::optional<int64_t> dim=c10::nullopt, int64_t edge_order=1) {
2244 return at::_ops::gradient_scalarint::call(self, spacing, dim, edge_order);
2245}
2246
2247// aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]
2248inline ::std::vector<at::Tensor> gradient(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order=1) {
2249 return at::_ops::gradient_scalararray::call(self, spacing, dim, edge_order);
2250}
2251
2252// aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]
2253inline ::std::vector<at::Tensor> gradient(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order=1) {
2254 return at::_ops::gradient_array::call(self, dim, edge_order);
2255}
2256
2257// aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
2258inline ::std::vector<at::Tensor> gradient(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, c10::optional<int64_t> dim=c10::nullopt, int64_t edge_order=1) {
2259 return at::_ops::gradient_scalarrayint::call(self, spacing, dim, edge_order);
2260}
2261
2262// aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]
2263inline ::std::vector<at::Tensor> gradient(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order=1) {
2264 return at::_ops::gradient_scalarrayarray::call(self, spacing, dim, edge_order);
2265}
2266
2267// aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
2268inline ::std::vector<at::Tensor> gradient(const at::Tensor & self, at::TensorList spacing, c10::optional<int64_t> dim=c10::nullopt, int64_t edge_order=1) {
2269 return at::_ops::gradient_tensorarrayint::call(self, spacing, dim, edge_order);
2270}
2271
2272// aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]
2273inline ::std::vector<at::Tensor> gradient(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order=1) {
2274 return at::_ops::gradient_tensorarray::call(self, spacing, dim, edge_order);
2275}
2276
2277// aten::div.Tensor(Tensor self, Tensor other) -> Tensor
2278inline at::Tensor div(const at::Tensor & self, const at::Tensor & other) {
2279 return at::_ops::div_Tensor::call(self, other);
2280}
2281
2282// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2283inline at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
2284 return at::_ops::div_out::call(self, other, out);
2285}
2286// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2287inline at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2288 return at::_ops::div_out::call(self, other, out);
2289}
2290
2291// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
2292inline at::Tensor div(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2293 return at::_ops::div_Tensor_mode::call(self, other, rounding_mode);
2294}
2295
2296// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
2297inline at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2298 return at::_ops::div_out_mode::call(self, other, rounding_mode, out);
2299}
2300// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
2301inline at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
2302 return at::_ops::div_out_mode::call(self, other, rounding_mode, out);
2303}
2304
2305// aten::div.Scalar(Tensor self, Scalar other) -> Tensor
2306inline at::Tensor div(const at::Tensor & self, const at::Scalar & other) {
2307 return at::_ops::div_Scalar::call(self, other);
2308}
2309
2310// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
2311inline at::Tensor div(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
2312 return at::_ops::div_Scalar_mode::call(self, other, rounding_mode);
2313}
2314
2315// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor
2316inline at::Tensor divide(const at::Tensor & self, const at::Tensor & other) {
2317 return at::_ops::divide_Tensor::call(self, other);
2318}
2319
2320// aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2321inline at::Tensor & divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
2322 return at::_ops::divide_out::call(self, other, out);
2323}
2324// aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2325inline at::Tensor & divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2326 return at::_ops::divide_out::call(self, other, out);
2327}
2328
2329// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor
2330inline at::Tensor divide(const at::Tensor & self, const at::Scalar & other) {
2331 return at::_ops::divide_Scalar::call(self, other);
2332}
2333
2334// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor
2335inline at::Tensor divide(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2336 return at::_ops::divide_Tensor_mode::call(self, other, rounding_mode);
2337}
2338
2339// aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
2340inline at::Tensor & divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
2341 return at::_ops::divide_out_mode::call(self, other, rounding_mode, out);
2342}
2343// aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
2344inline at::Tensor & divide_outf(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
2345 return at::_ops::divide_out_mode::call(self, other, rounding_mode, out);
2346}
2347
2348// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor
2349inline at::Tensor divide(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
2350 return at::_ops::divide_Scalar_mode::call(self, other, rounding_mode);
2351}
2352
2353// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor
2354inline at::Tensor true_divide(const at::Tensor & self, const at::Tensor & other) {
2355 return at::_ops::true_divide_Tensor::call(self, other);
2356}
2357
2358// aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2359inline at::Tensor & true_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
2360 return at::_ops::true_divide_out::call(self, other, out);
2361}
2362// aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2363inline at::Tensor & true_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2364 return at::_ops::true_divide_out::call(self, other, out);
2365}
2366
2367// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor
2368inline at::Tensor true_divide(const at::Tensor & self, const at::Scalar & other) {
2369 return at::_ops::true_divide_Scalar::call(self, other);
2370}
2371
2372// aten::dot(Tensor self, Tensor tensor) -> Tensor
2373inline at::Tensor dot(const at::Tensor & self, const at::Tensor & tensor) {
2374 return at::_ops::dot::call(self, tensor);
2375}
2376
2377// aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)
2378inline at::Tensor & dot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor) {
2379 return at::_ops::dot_out::call(self, tensor, out);
2380}
2381// aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)
2382inline at::Tensor & dot_outf(const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out) {
2383 return at::_ops::dot_out::call(self, tensor, out);
2384}
2385
2386// aten::vdot(Tensor self, Tensor other) -> Tensor
2387inline at::Tensor vdot(const at::Tensor & self, const at::Tensor & other) {
2388 return at::_ops::vdot::call(self, other);
2389}
2390
2391// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2392inline at::Tensor & vdot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
2393 return at::_ops::vdot_out::call(self, other, out);
2394}
2395// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2396inline at::Tensor & vdot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
2397 return at::_ops::vdot_out::call(self, other, out);
2398}
2399
2400// aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor
2401inline at::Tensor einsum(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path=c10::nullopt) {
2402 return at::_ops::einsum::call(equation, tensors, path);
2403}
2404
2405// aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
2406inline at::Tensor embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) {
2407 return at::_ops::embedding::call(weight, indices, padding_idx, scale_grad_by_freq, sparse);
2408}
2409namespace symint {
2410 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2411 at::Tensor embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) {
2412 return at::_ops::embedding::call(weight, indices, padding_idx, scale_grad_by_freq, sparse);
2413 }
2414}
2415
2416// aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
2417inline at::Tensor embedding_symint(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) {
2418 return at::_ops::embedding::call(weight, indices, padding_idx, scale_grad_by_freq, sparse);
2419}
2420namespace symint {
2421 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2422 at::Tensor embedding(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) {
2423 return at::_ops::embedding::call(weight, indices, padding_idx, scale_grad_by_freq, sparse);
2424 }
2425}
2426
2427// aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
2428inline at::Tensor embedding_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) {
2429 return at::_ops::embedding_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
2430}
2431namespace symint {
2432 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2433 at::Tensor embedding_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) {
2434 return at::_ops::embedding_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
2435 }
2436}
2437
2438// aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor
2439inline at::Tensor embedding_backward_symint(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
2440 return at::_ops::embedding_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
2441}
2442namespace symint {
2443 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2444 at::Tensor embedding_backward(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
2445 return at::_ops::embedding_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
2446 }
2447}
2448
2449// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor
2450inline at::Tensor embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
2451 return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
2452}
2453namespace symint {
2454 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2455 at::Tensor embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
2456 return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
2457 }
2458}
2459
2460// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor
2461inline at::Tensor embedding_dense_backward_symint(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
2462 return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
2463}
2464namespace symint {
2465 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2466 at::Tensor embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
2467 return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
2468 }
2469}
2470
2471// aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)
2472inline at::Tensor & embedding_renorm_(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
2473 return at::_ops::embedding_renorm_::call(self, indices, max_norm, norm_type);
2474}
2475
2476// aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor
2477inline at::Tensor embedding_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
2478 return at::_ops::embedding_sparse_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq);
2479}
2480
2481// aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
2482inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_forward_only(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional<at::Tensor> & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1) {
2483 return at::_ops::_embedding_bag_forward_only::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
2484}
2485
2486// aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)
2487inline ::std::tuple<at::Tensor,at::Tensor> _rowwise_prune(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) {
2488 return at::_ops::_rowwise_prune::call(weight, mask, compressed_indices_dtype);
2489}
2490
2491// aten::row_stack(Tensor[] tensors) -> Tensor
2493 return at::_ops::row_stack::call(tensors);
2494}
2495
2496// aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
2498 return at::_ops::row_stack_out::call(tensors, out);
2499}
2500// aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
2502 return at::_ops::row_stack_out::call(tensors, out);
2503}
2504
2505// aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
2506inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional<at::Tensor> & per_sample_weights={}, bool include_last_offset=false) {
2507 return at::_ops::embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
2508}
2509
2510// aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)
2511inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx) {
2512 return at::_ops::embedding_bag_padding_idx::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
2513}
2514
2515// aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)
2516inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional<at::Tensor> & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1) {
2517 return at::_ops::_embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
2518}
2519
2520// aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2521inline at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
2522 return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
2523}
2524namespace symint {
2525 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2526 at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
2527 return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
2528 }
2529}
2530
2531// aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2532inline at::Tensor _embedding_bag_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
2533 return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
2534}
2535namespace symint {
2536 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2537 at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
2538 return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
2539 }
2540}
2541
2542// aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2543inline at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
2544 return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2545}
2546namespace symint {
2547 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2548 at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
2549 return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2550 }
2551}
2552
2553// aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2554inline at::Tensor _embedding_bag_sparse_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
2555 return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2556}
2557namespace symint {
2558 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2559 at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
2560 return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2561 }
2562}
2563
2564// aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2565inline at::Tensor _embedding_bag_dense_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
2566 return at::_ops::_embedding_bag_dense_backward::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2567}
2568namespace symint {
2569 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2570 at::Tensor _embedding_bag_dense_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
2571 return at::_ops::_embedding_bag_dense_backward::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2572 }
2573}
2574
2575// aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
2576inline at::Tensor _embedding_bag_dense_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
2577 return at::_ops::_embedding_bag_dense_backward::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2578}
2579namespace symint {
2580 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2581 at::Tensor _embedding_bag_dense_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
2582 return at::_ops::_embedding_bag_dense_backward::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
2583 }
2584}
2585
2586// aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor
2587inline at::Tensor _embedding_bag_per_sample_weights_backward(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx=-1) {
2588 return at::_ops::_embedding_bag_per_sample_weights_backward::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx);
2589}
2590
2591// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2592inline at::Tensor empty(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
2593 return at::_ops::empty_names::call(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
2594}
2595// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2597 return at::_ops::empty_names::call(size, names, dtype, layout, device, pin_memory, memory_format);
2598}
2599
2600// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2601inline at::Tensor empty(at::IntArrayRef size, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
2602 return at::_ops::empty_memory_format::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
2603}
2604namespace symint {
2605 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2606 at::Tensor empty(at::IntArrayRef size, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
2607 return at::_ops::empty_memory_format::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
2608 }
2609}
2610
2611// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2613 return at::_ops::empty_memory_format::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format);
2614}
2615namespace symint {
2616 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2618 return at::_ops::empty_memory_format::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, memory_format);
2619 }
2620}
2621
2622// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2623inline at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
2624 return at::_ops::empty_memory_format::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
2625}
2626namespace symint {
2627 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2628 at::Tensor empty(c10::SymIntArrayRef size, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
2629 return at::_ops::empty_memory_format::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
2630 }
2631}
2632
2633// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2635 return at::_ops::empty_memory_format::call(size, dtype, layout, device, pin_memory, memory_format);
2636}
2637namespace symint {
2638 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2640 return at::_ops::empty_memory_format::call(size, dtype, layout, device, pin_memory, memory_format);
2641 }
2642}
2643
2644namespace symint {
2645 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2646 at::Tensor new_empty(const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}) {
2647 return at::_ops::new_empty::call(self, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2648 }
2649}
2650
2651namespace symint {
2652 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2654 return at::_ops::new_empty::call(self, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
2655 }
2656}
2657
2658namespace symint {
2659 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2660 at::Tensor new_empty(const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}) {
2661 return at::_ops::new_empty::call(self, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2662 }
2663}
2664
2665namespace symint {
2666 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2668 return at::_ops::new_empty::call(self, size, dtype, layout, device, pin_memory);
2669 }
2670}
2671
2672namespace symint {
2673 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2674 at::Tensor new_empty_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={}) {
2675 return at::_ops::new_empty_strided::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2676 }
2677}
2678
2679namespace symint {
2680 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2682 return at::_ops::new_empty_strided::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype, layout, device, pin_memory);
2683 }
2684}
2685
2686namespace symint {
2687 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2688 at::Tensor new_empty_strided(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options={}) {
2689 return at::_ops::new_empty_strided::call(self, size, stride, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2690 }
2691}
2692
2693namespace symint {
2694 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2695 at::Tensor new_empty_strided(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2696 return at::_ops::new_empty_strided::call(self, size, stride, dtype, layout, device, pin_memory);
2697 }
2698}
2699
2700namespace symint {
2701 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2702 at::Tensor new_full(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) {
2703 return at::_ops::new_full::call(self, c10::fromIntArrayRefSlow(size), fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2704 }
2705}
2706
2707namespace symint {
2708 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2709 at::Tensor new_full(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2710 return at::_ops::new_full::call(self, c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory);
2711 }
2712}
2713
2714namespace symint {
2715 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2716 at::Tensor new_full(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) {
2717 return at::_ops::new_full::call(self, size, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2718 }
2719}
2720
2721namespace symint {
2722 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2723 at::Tensor new_full(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
2724 return at::_ops::new_full::call(self, size, fill_value, dtype, layout, device, pin_memory);
2725 }
2726}
2727
2728namespace symint {
2729 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2730 at::Tensor new_zeros(const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}) {
2731 return at::_ops::new_zeros::call(self, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2732 }
2733}
2734
2735namespace symint {
2736 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2738 return at::_ops::new_zeros::call(self, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
2739 }
2740}
2741
2742namespace symint {
2743 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2744 at::Tensor new_zeros(const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}) {
2745 return at::_ops::new_zeros::call(self, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2746 }
2747}
2748
2749namespace symint {
2750 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2752 return at::_ops::new_zeros::call(self, size, dtype, layout, device, pin_memory);
2753 }
2754}
2755
2756namespace symint {
2757 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2758 at::Tensor new_ones(const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}) {
2759 return at::_ops::new_ones::call(self, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2760 }
2761}
2762
2763namespace symint {
2764 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2766 return at::_ops::new_ones::call(self, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
2767 }
2768}
2769
2770namespace symint {
2771 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2772 at::Tensor new_ones(const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}) {
2773 return at::_ops::new_ones::call(self, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2774 }
2775}
2776
2777namespace symint {
2778 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2780 return at::_ops::new_ones::call(self, size, dtype, layout, device, pin_memory);
2781 }
2782}
2783
2784// aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
2785inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous) {
2786 return at::_ops::_empty_affine_quantized::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
2787}
2788// aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
2789inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format) {
2790 return at::_ops::_empty_affine_quantized::call(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format);
2791}
2792
2793// aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
2794inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous) {
2795 return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
2796}
2797// aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor
2798inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
2799 return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
2800}
2801
2802namespace symint {
2803 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2804 const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
2805 return at::_ops::resize_::call(self, c10::fromIntArrayRefSlow(size), memory_format);
2806 }
2807}
2808
2809namespace symint {
2810 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2811 const at::Tensor & resize_(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
2812 return at::_ops::resize_::call(self, size, memory_format);
2813 }
2814}
2815
2816// aten::_resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!)
2817inline const at::Tensor & _resize_output_(const at::Tensor & self, at::IntArrayRef size, at::Device device) {
2818 return at::_ops::_resize_output_::call(self, size, device);
2819}
2820
2821// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2822inline at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
2823 return at::_ops::empty_quantized::call(size, qtensor, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
2824}
2825// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2827 return at::_ops::empty_quantized::call(size, qtensor, dtype, layout, device, pin_memory, memory_format);
2828}
2829
2830// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
2831inline at::Tensor & empty_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
2832 return at::_ops::empty_out::call(c10::fromIntArrayRefSlow(size), memory_format, out);
2833}
2834namespace symint {
2835 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2837 return at::_ops::empty_out::call(c10::fromIntArrayRefSlow(size), memory_format, out);
2838 }
2839}
2840
2841// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
2842inline at::Tensor & empty_outf(at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
2843 return at::_ops::empty_out::call(c10::fromIntArrayRefSlow(size), memory_format, out);
2844}
2845namespace symint {
2846 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2847 at::Tensor & empty_outf(at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
2848 return at::_ops::empty_out::call(c10::fromIntArrayRefSlow(size), memory_format, out);
2849 }
2850}
2851
2852// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
2853inline at::Tensor & empty_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
2854 return at::_ops::empty_out::call(size, memory_format, out);
2855}
2856namespace symint {
2857 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2859 return at::_ops::empty_out::call(size, memory_format, out);
2860 }
2861}
2862
2863// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
2864inline at::Tensor & empty_symint_outf(c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
2865 return at::_ops::empty_out::call(size, memory_format, out);
2866}
2867namespace symint {
2868 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2869 at::Tensor & empty_outf(c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
2870 return at::_ops::empty_out::call(size, memory_format, out);
2871 }
2872}
2873
2874// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2875inline at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
2876 return at::_ops::empty_like::call(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
2877}
2878// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2880 return at::_ops::empty_like::call(self, dtype, layout, device, pin_memory, memory_format);
2881}
2882
2883// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2884inline at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={}) {
2885 return at::_ops::empty_strided::call(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2886}
2887namespace symint {
2888 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2889 at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={}) {
2890 return at::_ops::empty_strided::call(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2891 }
2892}
2893
2894// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2896 return at::_ops::empty_strided::call(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype, layout, device, pin_memory);
2897}
2898namespace symint {
2899 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
2901 return at::_ops::empty_strided::call(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype, layout, device, pin_memory);
2902 }
2903}
2904
2905// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2906inline at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options={}) {
2907 return at::_ops::empty_strided::call(size, stride, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2908}
2909namespace symint {
2910 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2911 at::Tensor empty_strided(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options={}) {
2912 return at::_ops::empty_strided::call(size, stride, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
2913 }
2914}
2915
2916// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2918 return at::_ops::empty_strided::call(size, stride, dtype, layout, device, pin_memory);
2919}
2920namespace symint {
2921 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
2923 return at::_ops::empty_strided::call(size, stride, dtype, layout, device, pin_memory);
2924 }
2925}
2926
2927// aten::erf(Tensor self) -> Tensor
2928inline at::Tensor erf(const at::Tensor & self) {
2929 return at::_ops::erf::call(self);
2930}
2931
2932// aten::erf_(Tensor(a!) self) -> Tensor(a!)
2933inline at::Tensor & erf_(at::Tensor & self) {
2934 return at::_ops::erf_::call(self);
2935}
2936
2937// aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2938inline at::Tensor & erf_out(at::Tensor & out, const at::Tensor & self) {
2939 return at::_ops::erf_out::call(self, out);
2940}
2941// aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2942inline at::Tensor & erf_outf(const at::Tensor & self, at::Tensor & out) {
2943 return at::_ops::erf_out::call(self, out);
2944}
2945
2946// aten::erfc(Tensor self) -> Tensor
2947inline at::Tensor erfc(const at::Tensor & self) {
2948 return at::_ops::erfc::call(self);
2949}
2950
2951// aten::erfc_(Tensor(a!) self) -> Tensor(a!)
2952inline at::Tensor & erfc_(at::Tensor & self) {
2953 return at::_ops::erfc_::call(self);
2954}
2955
2956// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2957inline at::Tensor & erfc_out(at::Tensor & out, const at::Tensor & self) {
2958 return at::_ops::erfc_out::call(self, out);
2959}
2960// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2961inline at::Tensor & erfc_outf(const at::Tensor & self, at::Tensor & out) {
2962 return at::_ops::erfc_out::call(self, out);
2963}
2964
2965// aten::exp(Tensor self) -> Tensor
2966inline at::Tensor exp(const at::Tensor & self) {
2967 return at::_ops::exp::call(self);
2968}
2969
2970// aten::exp_(Tensor(a!) self) -> Tensor(a!)
2971inline at::Tensor & exp_(at::Tensor & self) {
2972 return at::_ops::exp_::call(self);
2973}
2974
2975// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2976inline at::Tensor & exp_out(at::Tensor & out, const at::Tensor & self) {
2977 return at::_ops::exp_out::call(self, out);
2978}
2979// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2980inline at::Tensor & exp_outf(const at::Tensor & self, at::Tensor & out) {
2981 return at::_ops::exp_out::call(self, out);
2982}
2983
2984// aten::exp2(Tensor self) -> Tensor
2985inline at::Tensor exp2(const at::Tensor & self) {
2986 return at::_ops::exp2::call(self);
2987}
2988
2989// aten::exp2_(Tensor(a!) self) -> Tensor(a!)
2990inline at::Tensor & exp2_(at::Tensor & self) {
2991 return at::_ops::exp2_::call(self);
2992}
2993
2994// aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2995inline at::Tensor & exp2_out(at::Tensor & out, const at::Tensor & self) {
2996 return at::_ops::exp2_out::call(self, out);
2997}
2998// aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2999inline at::Tensor & exp2_outf(const at::Tensor & self, at::Tensor & out) {
3000 return at::_ops::exp2_out::call(self, out);
3001}
3002
3003// aten::expm1(Tensor self) -> Tensor
3004inline at::Tensor expm1(const at::Tensor & self) {
3005 return at::_ops::expm1::call(self);
3006}
3007
3008// aten::expm1_(Tensor(a!) self) -> Tensor(a!)
3009inline at::Tensor & expm1_(at::Tensor & self) {
3010 return at::_ops::expm1_::call(self);
3011}
3012
3013// aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3014inline at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self) {
3015 return at::_ops::expm1_out::call(self, out);
3016}
3017// aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3018inline at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out) {
3019 return at::_ops::expm1_out::call(self, out);
3020}
3021
3022namespace symint {
3023 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3024 at::Tensor expand(const at::Tensor & self, at::IntArrayRef size, bool implicit=false) {
3025 return at::_ops::expand::call(self, c10::fromIntArrayRefSlow(size), implicit);
3026 }
3027}
3028
3029namespace symint {
3030 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3031 at::Tensor expand(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit=false) {
3032 return at::_ops::expand::call(self, size, implicit);
3033 }
3034}
3035
3036// aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3037inline at::Tensor eye(int64_t n, at::TensorOptions options={}) {
3038 return at::_ops::eye::call(n, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3039}
3040// aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3042 return at::_ops::eye::call(n, dtype, layout, device, pin_memory);
3043}
3044
3045// aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3046inline at::Tensor eye(int64_t n, int64_t m, at::TensorOptions options={}) {
3047 return at::_ops::eye_m::call(n, m, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3048}
3049// aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3051 return at::_ops::eye_m::call(n, m, dtype, layout, device, pin_memory);
3052}
3053
3054// aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!)
3055inline at::Tensor & eye_out(at::Tensor & out, int64_t n) {
3056 return at::_ops::eye_out::call(n, out);
3057}
3058// aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!)
3059inline at::Tensor & eye_outf(int64_t n, at::Tensor & out) {
3060 return at::_ops::eye_out::call(n, out);
3061}
3062
3063// aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!)
3064inline at::Tensor & eye_out(at::Tensor & out, int64_t n, int64_t m) {
3065 return at::_ops::eye_m_out::call(n, m, out);
3066}
3067// aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!)
3068inline at::Tensor & eye_outf(int64_t n, int64_t m, at::Tensor & out) {
3069 return at::_ops::eye_m_out::call(n, m, out);
3070}
3071
3072// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
3073inline at::Tensor flatten(const at::Tensor & self, int64_t start_dim=0, int64_t end_dim=-1) {
3074 return at::_ops::flatten_using_ints::call(self, start_dim, end_dim);
3075}
3076
3077// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)
3078inline at::Tensor flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) {
3079 return at::_ops::flatten_named_out_dim::call(self, start_dim, end_dim, out_dim);
3080}
3081
3082// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)
3083inline at::Tensor flatten(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) {
3084 return at::_ops::flatten_using_names::call(self, start_dim, end_dim, out_dim);
3085}
3086
3087// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)
3088inline at::Tensor flatten(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) {
3089 return at::_ops::flatten_DimnameList::call(self, dims, out_dim);
3090}
3091
3092// aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a)
3093inline at::Tensor unflatten(const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) {
3094 return at::_ops::unflatten_int::call(self, dim, sizes);
3095}
3096
3097// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a)
3098inline at::Tensor unflatten(const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) {
3099 return at::_ops::unflatten_Dimname::call(self, dim, sizes, names);
3100}
3101
3102// aten::fill.Scalar(Tensor self, Scalar value) -> Tensor
3103inline at::Tensor fill(const at::Tensor & self, const at::Scalar & value) {
3104 return at::_ops::fill_Scalar::call(self, value);
3105}
3106
3107// aten::fill.Tensor(Tensor self, Tensor value) -> Tensor
3108inline at::Tensor fill(const at::Tensor & self, const at::Tensor & value) {
3109 return at::_ops::fill_Tensor::call(self, value);
3110}
3111
3112// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
3113inline at::Tensor & fill_(at::Tensor & self, const at::Scalar & value) {
3114 return at::_ops::fill__Scalar::call(self, value);
3115}
3116
3117// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
3118inline at::Tensor & fill_(at::Tensor & self, const at::Tensor & value) {
3119 return at::_ops::fill__Tensor::call(self, value);
3120}
3121
3122// aten::floor(Tensor self) -> Tensor
3123inline at::Tensor floor(const at::Tensor & self) {
3124 return at::_ops::floor::call(self);
3125}
3126
3127// aten::floor_(Tensor(a!) self) -> Tensor(a!)
3128inline at::Tensor & floor_(at::Tensor & self) {
3129 return at::_ops::floor_::call(self);
3130}
3131
3132// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3133inline at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self) {
3134 return at::_ops::floor_out::call(self, out);
3135}
3136// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3137inline at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out) {
3138 return at::_ops::floor_out::call(self, out);
3139}
3140
3141// aten::floor_divide(Tensor self, Tensor other) -> Tensor
3142inline at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other) {
3143 return at::_ops::floor_divide::call(self, other);
3144}
3145
3146// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3147inline at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
3148 return at::_ops::floor_divide_out::call(self, other, out);
3149}
3150// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3151inline at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3152 return at::_ops::floor_divide_out::call(self, other, out);
3153}
3154
3155// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
3156inline at::Tensor floor_divide(const at::Tensor & self, const at::Scalar & other) {
3157 return at::_ops::floor_divide_Scalar::call(self, other);
3158}
3159
3160// aten::frac(Tensor self) -> Tensor
3161inline at::Tensor frac(const at::Tensor & self) {
3162 return at::_ops::frac::call(self);
3163}
3164
3165// aten::frac_(Tensor(a!) self) -> Tensor(a!)
3166inline at::Tensor & frac_(at::Tensor & self) {
3167 return at::_ops::frac_::call(self);
3168}
3169
3170// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3171inline at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self) {
3172 return at::_ops::frac_out::call(self, out);
3173}
3174// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
3175inline at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out) {
3176 return at::_ops::frac_out::call(self, out);
3177}
3178
3179// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3180inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
3181 return at::_ops::full_names::call(size, fill_value, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3182}
3183// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3184inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3185 return at::_ops::full_names::call(size, fill_value, names, dtype, layout, device, pin_memory);
3186}
3187
3188// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3189inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) {
3190 return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3191}
3192namespace symint {
3193 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3194 at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) {
3195 return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3196 }
3197}
3198
3199// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3200inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3201 return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory);
3202}
3203namespace symint {
3204 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3205 at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3206 return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory);
3207 }
3208}
3209
3210// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3211inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) {
3212 return at::_ops::full::call(size, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3213}
3214namespace symint {
3215 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3216 at::Tensor full(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) {
3217 return at::_ops::full::call(size, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3218 }
3219}
3220
3221// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3222inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3223 return at::_ops::full::call(size, fill_value, dtype, layout, device, pin_memory);
3224}
3225namespace symint {
3226 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3227 at::Tensor full(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3228 return at::_ops::full::call(size, fill_value, dtype, layout, device, pin_memory);
3229 }
3230}
3231
3232// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
3233inline at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) {
3234 return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out);
3235}
3236namespace symint {
3237 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3238 at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) {
3239 return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out);
3240 }
3241}
3242
3243// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
3244inline at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
3245 return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out);
3246}
3247namespace symint {
3248 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3249 at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
3250 return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out);
3251 }
3252}
3253
3254// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
3255inline at::Tensor & full_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) {
3256 return at::_ops::full_out::call(size, fill_value, out);
3257}
3258namespace symint {
3259 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3260 at::Tensor & full_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) {
3261 return at::_ops::full_out::call(size, fill_value, out);
3262 }
3263}
3264
3265// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
3266inline at::Tensor & full_symint_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
3267 return at::_ops::full_out::call(size, fill_value, out);
3268}
3269namespace symint {
3270 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3271 at::Tensor & full_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
3272 return at::_ops::full_out::call(size, fill_value, out);
3273 }
3274}
3275
3276// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
3277inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
3278 return at::_ops::full_like::call(self, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
3279}
3280// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
3281inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
3282 return at::_ops::full_like::call(self, fill_value, dtype, layout, device, pin_memory, memory_format);
3283}
3284
3285// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3286inline at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared=c10::nullopt, c10::optional<int64_t> size=0, at::TensorOptions options={}) {
3287 return at::_ops::from_file::call(filename, shared, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3288}
3289// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3291 return at::_ops::from_file::call(filename, shared, size, dtype, layout, device, pin_memory);
3292}
3293
3294// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3295inline at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
3296 return at::_ops::gcd_out::call(self, other, out);
3297}
3298// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3299inline at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3300 return at::_ops::gcd_out::call(self, other, out);
3301}
3302
3303// aten::gcd(Tensor self, Tensor other) -> Tensor
3304inline at::Tensor gcd(const at::Tensor & self, const at::Tensor & other) {
3305 return at::_ops::gcd::call(self, other);
3306}
3307
3308// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)
3309inline at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other) {
3310 return at::_ops::gcd_::call(self, other);
3311}
3312
3313// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3314inline at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
3315 return at::_ops::lcm_out::call(self, other, out);
3316}
3317// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3318inline at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3319 return at::_ops::lcm_out::call(self, other, out);
3320}
3321
3322// aten::lcm(Tensor self, Tensor other) -> Tensor
3323inline at::Tensor lcm(const at::Tensor & self, const at::Tensor & other) {
3324 return at::_ops::lcm::call(self, other);
3325}
3326
3327// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)
3328inline at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other) {
3329 return at::_ops::lcm_::call(self, other);
3330}
3331
3332// aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
3333inline at::Tensor grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
3334 return at::_ops::grid_sampler::call(input, grid, interpolation_mode, padding_mode, align_corners);
3335}
3336
3337// aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
3338inline at::Tensor grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
3339 return at::_ops::grid_sampler_2d::call(input, grid, interpolation_mode, padding_mode, align_corners);
3340}
3341
3342// aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
3343inline ::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
3344 return at::_ops::grid_sampler_2d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
3345}
3346
3347// aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
3348inline at::Tensor _grid_sampler_2d_cpu_fallback(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
3349 return at::_ops::_grid_sampler_2d_cpu_fallback::call(input, grid, interpolation_mode, padding_mode, align_corners);
3350}
3351
3352// aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
3353inline ::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
3354 return at::_ops::_grid_sampler_2d_cpu_fallback_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
3355}
3356
3357// aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor
3358inline at::Tensor grid_sampler_3d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
3359 return at::_ops::grid_sampler_3d::call(input, grid, interpolation_mode, padding_mode, align_corners);
3360}
3361
3362// aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)
3363inline ::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
3364 return at::_ops::grid_sampler_3d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
3365}
3366
3367// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3368inline at::Tensor hann_window(int64_t window_length, at::TensorOptions options={}) {
3369 return at::_ops::hann_window::call(window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3370}
3371// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3373 return at::_ops::hann_window::call(window_length, dtype, layout, device, pin_memory);
3374}
3375
3376// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3377inline at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options={}) {
3378 return at::_ops::hann_window_periodic::call(window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3379}
3380// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3381inline at::Tensor hann_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3382 return at::_ops::hann_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory);
3383}
3384
3385// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3386inline at::Tensor hamming_window(int64_t window_length, at::TensorOptions options={}) {
3387 return at::_ops::hamming_window::call(window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3388}
3389// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3391 return at::_ops::hamming_window::call(window_length, dtype, layout, device, pin_memory);
3392}
3393
3394// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3395inline at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options={}) {
3396 return at::_ops::hamming_window_periodic::call(window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3397}
3398// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3399inline at::Tensor hamming_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3400 return at::_ops::hamming_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory);
3401}
3402
3403// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3404inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options={}) {
3405 return at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3406}
3407// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3408inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3409 return at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, dtype, layout, device, pin_memory);
3410}
3411
3412// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3413inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options={}) {
3414 return at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3415}
3416// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3417inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3418 return at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, dtype, layout, device, pin_memory);
3419}
3420
3421// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3422inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options={}) {
3423 return at::_ops::kaiser_window::call(window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3424}
3425// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3427 return at::_ops::kaiser_window::call(window_length, dtype, layout, device, pin_memory);
3428}
3429
3430// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3431inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options={}) {
3432 return at::_ops::kaiser_window_periodic::call(window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3433}
3434// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3435inline at::Tensor kaiser_window(int64_t window_length, bool periodic, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3436 return at::_ops::kaiser_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory);
3437}
3438
3439// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3440inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options={}) {
3441 return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
3442}
3443// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
3444inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
3445 return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, dtype, layout, device, pin_memory);
3446}
3447
3448// aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor
3449inline at::Tensor hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin=1.0, int64_t reduction=at::Reduction::Mean) {
3450 return at::_ops::hinge_embedding_loss::call(self, target, margin, reduction);
3451}
3452
3453// aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
3454inline at::Tensor group_norm(const at::Tensor & input, int64_t num_groups, const c10::optional<at::Tensor> & weight={}, const c10::optional<at::Tensor> & bias={}, double eps=1e-05, bool cudnn_enabled=true) {
3455 return at::_ops::group_norm::call(input, num_groups, weight, bias, eps, cudnn_enabled);
3456}
3457
3458// aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
3459inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) {
3460 return at::_ops::native_group_norm::call(input, weight, bias, N, C, HxW, group, eps);
3461}
3462namespace symint {
3463 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3464 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) {
3465 return at::_ops::native_group_norm::call(input, weight, bias, N, C, HxW, group, eps);
3466 }
3467}
3468
3469// aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
3470inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_symint(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
3471 return at::_ops::native_group_norm::call(input, weight, bias, N, C, HxW, group, eps);
3472}
3473namespace symint {
3474 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3475 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
3476 return at::_ops::native_group_norm::call(input, weight, bias, N, C, HxW, group, eps);
3477 }
3478}
3479
3480// aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
3481inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array<bool,3> output_mask) {
3482 return at::_ops::native_group_norm_backward::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
3483}
3484namespace symint {
3485 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3486 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array<bool,3> output_mask) {
3487 return at::_ops::native_group_norm_backward::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
3488 }
3489}
3490
3491// aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
3492inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward_symint(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
3493 return at::_ops::native_group_norm_backward::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
3494}
3495namespace symint {
3496 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3497 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
3498 return at::_ops::native_group_norm_backward::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
3499 }
3500}
3501
3502// aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor
3503inline at::Tensor _fft_r2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
3504 return at::_ops::_fft_r2c::call(self, dim, normalization, onesided);
3505}
3506
3507// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
3508inline at::Tensor & _fft_r2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
3509 return at::_ops::_fft_r2c_out::call(self, dim, normalization, onesided, out);
3510}
3511// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)
3512inline at::Tensor & _fft_r2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) {
3513 return at::_ops::_fft_r2c_out::call(self, dim, normalization, onesided, out);
3514}
3515
3516// aten::_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor
3517inline at::Tensor _fft_c2r(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
3518 return at::_ops::_fft_c2r::call(self, dim, normalization, last_dim_size);
3519}
3520
3521// aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!)
3522inline at::Tensor & _fft_c2r_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
3523 return at::_ops::_fft_c2r_out::call(self, dim, normalization, last_dim_size, out);
3524}
3525// aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!)
3526inline at::Tensor & _fft_c2r_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor & out) {
3527 return at::_ops::_fft_c2r_out::call(self, dim, normalization, last_dim_size, out);
3528}
3529
3530// aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor
3531inline at::Tensor _fft_c2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) {
3532 return at::_ops::_fft_c2c::call(self, c10::fromIntArrayRefSlow(dim), normalization, forward);
3533}
3534namespace symint {
3535 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3536 at::Tensor _fft_c2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) {
3537 return at::_ops::_fft_c2c::call(self, c10::fromIntArrayRefSlow(dim), normalization, forward);
3538 }
3539}
3540
3541// aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor
3542inline at::Tensor _fft_c2c_symint(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
3543 return at::_ops::_fft_c2c::call(self, dim, normalization, forward);
3544}
3545namespace symint {
3546 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3547 at::Tensor _fft_c2c(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
3548 return at::_ops::_fft_c2c::call(self, dim, normalization, forward);
3549 }
3550}
3551
3552// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
3553inline at::Tensor & _fft_c2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) {
3554 return at::_ops::_fft_c2c_out::call(self, c10::fromIntArrayRefSlow(dim), normalization, forward, out);
3555}
3556namespace symint {
3557 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3558 at::Tensor & _fft_c2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) {
3559 return at::_ops::_fft_c2c_out::call(self, c10::fromIntArrayRefSlow(dim), normalization, forward, out);
3560 }
3561}
3562
3563// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
3564inline at::Tensor & _fft_c2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
3565 return at::_ops::_fft_c2c_out::call(self, c10::fromIntArrayRefSlow(dim), normalization, forward, out);
3566}
3567namespace symint {
3568 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3569 at::Tensor & _fft_c2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
3570 return at::_ops::_fft_c2c_out::call(self, c10::fromIntArrayRefSlow(dim), normalization, forward, out);
3571 }
3572}
3573
3574// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
3575inline at::Tensor & _fft_c2c_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
3576 return at::_ops::_fft_c2c_out::call(self, dim, normalization, forward, out);
3577}
3578namespace symint {
3579 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3580 at::Tensor & _fft_c2c_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
3581 return at::_ops::_fft_c2c_out::call(self, dim, normalization, forward, out);
3582 }
3583}
3584
3585// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)
3586inline at::Tensor & _fft_c2c_symint_outf(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
3587 return at::_ops::_fft_c2c_out::call(self, dim, normalization, forward, out);
3588}
3589namespace symint {
3590 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3591 at::Tensor & _fft_c2c_outf(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
3592 return at::_ops::_fft_c2c_out::call(self, dim, normalization, forward, out);
3593 }
3594}
3595
3596// aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()
3597inline void _validate_compressed_sparse_indices(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
3598 return at::_ops::_validate_compressed_sparse_indices::call(is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
3599}
3600
3601// aten::_cufft_get_plan_cache_size(int device_index) -> int
3602inline int64_t _cufft_get_plan_cache_size(int64_t device_index) {
3603 return at::_ops::_cufft_get_plan_cache_size::call(device_index);
3604}
3605
3606// aten::_cufft_get_plan_cache_max_size(int device_index) -> int
3607inline int64_t _cufft_get_plan_cache_max_size(int64_t device_index) {
3608 return at::_ops::_cufft_get_plan_cache_max_size::call(device_index);
3609}
3610
3611// aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> ()
3612inline void _cufft_set_plan_cache_max_size(int64_t device_index, int64_t max_size) {
3613 return at::_ops::_cufft_set_plan_cache_max_size::call(device_index, max_size);
3614}
3615
3616// aten::_cufft_clear_plan_cache(int device_index) -> ()
3617inline void _cufft_clear_plan_cache(int64_t device_index) {
3618 return at::_ops::_cufft_clear_plan_cache::call(device_index);
3619}
3620
3621// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
3622inline at::Tensor index(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
3623 return at::_ops::index_Tensor::call(self, indices);
3624}
3625
3626// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
3627inline at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
3628 return at::_ops::index_Tensor_out::call(self, indices, out);
3629}
3630// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)
3631inline at::Tensor & index_outf(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, at::Tensor & out) {
3632 return at::_ops::index_Tensor_out::call(self, indices, out);
3633}
3634
3635// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
3636inline at::Tensor & index_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
3637 return at::_ops::index_copy_out::call(self, dim, index, source, out);
3638}
3639// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
3640inline at::Tensor & index_copy_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) {
3641 return at::_ops::index_copy_out::call(self, dim, index, source, out);
3642}
3643
3644// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor
3645inline at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
3646 return at::_ops::index_copy::call(self, dim, index, source);
3647}
3648
3649// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
3650inline at::Tensor index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
3651 return at::_ops::index_copy_dimname::call(self, dim, index, source);
3652}
3653
3654// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)
3655inline at::Tensor & index_put_(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false) {
3656 return at::_ops::index_put_::call(self, indices, values, accumulate);
3657}
3658
3659// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
3660inline at::Tensor index_put(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false) {
3661 return at::_ops::index_put::call(self, indices, values, accumulate);
3662}
3663
3664// aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
3665inline at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) {
3666 return at::_ops::_index_put_impl_::call(self, indices, values, accumulate, unsafe);
3667}
3668
3669// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor
3670inline at::Tensor instance_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
3671 return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
3672}
3673
3674// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
3675inline at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) {
3676 return at::_ops::isclose::call(self, other, rtol, atol, equal_nan);
3677}
3678
3679// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
3680inline at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) {
3681 return at::_ops::isin_Tensor_Tensor_out::call(elements, test_elements, assume_unique, invert, out);
3682}
3683// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
3684inline at::Tensor & isin_outf(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
3685 return at::_ops::isin_Tensor_Tensor_out::call(elements, test_elements, assume_unique, invert, out);
3686}
3687
3688// aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
3689inline at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) {
3690 return at::_ops::isin_Tensor_Tensor::call(elements, test_elements, assume_unique, invert);
3691}
3692
3693// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
3694inline at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false) {
3695 return at::_ops::isin_Tensor_Scalar_out::call(elements, test_element, assume_unique, invert, out);
3696}
3697// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
3698inline at::Tensor & isin_outf(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) {
3699 return at::_ops::isin_Tensor_Scalar_out::call(elements, test_element, assume_unique, invert, out);
3700}
3701
3702// aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor
3703inline at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false) {
3704 return at::_ops::isin_Tensor_Scalar::call(elements, test_element, assume_unique, invert);
3705}
3706
3707// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
3708inline at::Tensor & isin_out(at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) {
3709 return at::_ops::isin_Scalar_Tensor_out::call(element, test_elements, assume_unique, invert, out);
3710}
3711// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
3712inline at::Tensor & isin_outf(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
3713 return at::_ops::isin_Scalar_Tensor_out::call(element, test_elements, assume_unique, invert, out);
3714}
3715
3716// aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
3717inline at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) {
3718 return at::_ops::isin_Scalar_Tensor::call(element, test_elements, assume_unique, invert);
3719}
3720
3721// aten::isnan(Tensor self) -> Tensor
3722inline at::Tensor isnan(const at::Tensor & self) {
3723 return at::_ops::isnan::call(self);
3724}
3725
3726// aten::is_distributed(Tensor self) -> bool
3727inline bool is_distributed(const at::Tensor & self) {
3728 return at::_ops::is_distributed::call(self);
3729}
3730
3731// aten::is_floating_point(Tensor self) -> bool
3732inline bool __dispatch_is_floating_point(const at::Tensor & self) {
3733 return at::_ops::is_floating_point::call(self);
3734}
3735
3736// aten::is_complex(Tensor self) -> bool
3737inline bool __dispatch_is_complex(const at::Tensor & self) {
3738 return at::_ops::is_complex::call(self);
3739}
3740
3741// aten::is_conj(Tensor self) -> bool
3742inline bool __dispatch_is_conj(const at::Tensor & self) {
3743 return at::_ops::is_conj::call(self);
3744}
3745
3746// aten::_is_zerotensor(Tensor self) -> bool
3747inline bool __dispatch__is_zerotensor(const at::Tensor & self) {
3748 return at::_ops::_is_zerotensor::call(self);
3749}
3750
3751// aten::is_neg(Tensor self) -> bool
3752inline bool __dispatch_is_neg(const at::Tensor & self) {
3753 return at::_ops::is_neg::call(self);
3754}
3755
3756// aten::isreal(Tensor self) -> Tensor
3757inline at::Tensor isreal(const at::Tensor & self) {
3758 return at::_ops::isreal::call(self);
3759}
3760
3761// aten::is_nonzero(Tensor self) -> bool
3762inline bool is_nonzero(const at::Tensor & self) {
3763 return at::_ops::is_nonzero::call(self);
3764}
3765
3766// aten::is_same_size(Tensor self, Tensor other) -> bool
3767inline bool is_same_size(const at::Tensor & self, const at::Tensor & other) {
3768 return at::_ops::is_same_size::call(self, other);
3769}
3770
3771// aten::is_signed(Tensor self) -> bool
3772inline bool __dispatch_is_signed(const at::Tensor & self) {
3773 return at::_ops::is_signed::call(self);
3774}
3775
3776// aten::is_inference(Tensor self) -> bool
3777inline bool __dispatch_is_inference(const at::Tensor & self) {
3778 return at::_ops::is_inference::call(self);
3779}
3780
3781// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
3782inline at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, bool log_target=false) {
3783 return at::_ops::kl_div::call(self, target, reduction, log_target);
3784}
3785
3786// aten::kron(Tensor self, Tensor other) -> Tensor
3787inline at::Tensor kron(const at::Tensor & self, const at::Tensor & other) {
3788 return at::_ops::kron::call(self, other);
3789}
3790
3791// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3792inline at::Tensor & kron_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
3793 return at::_ops::kron_out::call(self, other, out);
3794}
3795// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
3796inline at::Tensor & kron_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
3797 return at::_ops::kron_out::call(self, other, out);
3798}
3799
3800// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
3801inline ::std::tuple<at::Tensor,at::Tensor> kthvalue(const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) {
3802 return at::_ops::kthvalue::call(self, k, dim, keepdim);
3803}
3804
3805// aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3806inline ::std::tuple<at::Tensor &,at::Tensor &> kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) {
3807 return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices);
3808}
3809// aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3810inline ::std::tuple<at::Tensor &,at::Tensor &> kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3811 return at::_ops::kthvalue_values::call(self, k, dim, keepdim, values, indices);
3812}
3813
3814// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
3815inline ::std::tuple<at::Tensor,at::Tensor> kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) {
3816 return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim);
3817}
3818
3819// aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3820inline ::std::tuple<at::Tensor &,at::Tensor &> kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) {
3821 return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices);
3822}
3823// aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
3824inline ::std::tuple<at::Tensor &,at::Tensor &> kthvalue_outf(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
3825 return at::_ops::kthvalue_dimname_out::call(self, k, dim, keepdim, values, indices);
3826}
3827
3828// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
3829inline at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight={}, const c10::optional<at::Tensor> & bias={}, double eps=1e-05, bool cudnn_enable=true) {
3830 return at::_ops::layer_norm::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, cudnn_enable);
3831}
3832namespace symint {
3833 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3834 at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight={}, const c10::optional<at::Tensor> & bias={}, double eps=1e-05, bool cudnn_enable=true) {
3835 return at::_ops::layer_norm::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, cudnn_enable);
3836 }
3837}
3838
3839// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
3840inline at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight={}, const c10::optional<at::Tensor> & bias={}, double eps=1e-05, bool cudnn_enable=true) {
3841 return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable);
3842}
3843namespace symint {
3844 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3845 at::Tensor layer_norm(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight={}, const c10::optional<at::Tensor> & bias={}, double eps=1e-05, bool cudnn_enable=true) {
3846 return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable);
3847 }
3848}
3849
3850// aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
3851inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
3852 return at::_ops::native_layer_norm::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps);
3853}
3854namespace symint {
3855 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3856 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
3857 return at::_ops::native_layer_norm::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps);
3858 }
3859}
3860
3861// aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
3862inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
3863 return at::_ops::native_layer_norm::call(input, normalized_shape, weight, bias, eps);
3864}
3865namespace symint {
3866 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3867 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
3868 return at::_ops::native_layer_norm::call(input, normalized_shape, weight, bias, eps);
3869 }
3870}
3871
3872// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
3873inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
3874 return at::_ops::native_layer_norm_backward::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask);
3875}
3876namespace symint {
3877 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
3878 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
3879 return at::_ops::native_layer_norm_backward::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask);
3880 }
3881}
3882
3883// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
3884inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward_symint(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
3885 return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
3886}
3887namespace symint {
3888 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
3889 ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
3890 return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
3891 }
3892}
3893
3894// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
3896 return at::_ops::nan_to_num::call(self, nan, posinf, neginf);
3897}
3898
3899// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)
3901 return at::_ops::nan_to_num_::call(self, nan, posinf, neginf);
3902}
3903
3904// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)
3906 return at::_ops::nan_to_num_out::call(self, nan, posinf, neginf, out);
3907}
3908// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)
3910 return at::_ops::nan_to_num_out::call(self, nan, posinf, neginf, out);
3911}
3912
3913// aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
3914inline at::Tensor linear(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}) {
3915 return at::_ops::linear::call(input, weight, bias);
3916}
3917
3918// aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
3919inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linear_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
3920 return at::_ops::linear_backward::call(self, grad_output, weight, output_mask);
3921}
3922
3923// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
3924inline at::Tensor & linear_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}) {
3925 return at::_ops::linear_out::call(input, weight, bias, out);
3926}
3927// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
3928inline at::Tensor & linear_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) {
3929 return at::_ops::linear_out::call(input, weight, bias, out);
3930}
3931
3932// aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
3933inline at::Tensor mkldnn_linear(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}) {
3934 return at::_ops::mkldnn_linear::call(self, weight, bias);
3935}
3936
3937// aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor
3938inline at::Tensor mkldnn_linear_backward_input(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) {
3939 return at::_ops::mkldnn_linear_backward_input::call(input_size, grad_output, weight);
3940}
3941
3942// aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)
3943inline ::std::tuple<at::Tensor,at::Tensor> mkldnn_linear_backward_weights(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) {
3944 return at::_ops::mkldnn_linear_backward_weights::call(grad_output, input, weight, bias_defined);
3945}
3946
3947// aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
3948inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_linear_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
3949 return at::_ops::mkldnn_linear_backward::call(self, grad_output, weight, output_mask);
3950}
3951
3952// aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
3953inline at::Tensor fbgemm_linear_int8_weight_fp32_activation(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
3954 return at::_ops::fbgemm_linear_int8_weight_fp32_activation::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
3955}
3956
3957// aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
3958inline at::Tensor fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
3959 return at::_ops::fbgemm_linear_int8_weight::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
3960}
3961
3962// aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
3963inline ::std::tuple<at::Tensor,at::Tensor,double,int64_t> fbgemm_linear_quantize_weight(const at::Tensor & input) {
3964 return at::_ops::fbgemm_linear_quantize_weight::call(input);
3965}
3966
3967// aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor
3969 return at::_ops::fbgemm_pack_gemm_matrix_fp16::call(input);
3970}
3971
3972// aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
3973inline at::Tensor fbgemm_linear_fp16_weight_fp32_activation(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
3974 return at::_ops::fbgemm_linear_fp16_weight_fp32_activation::call(input, packed_weight, bias);
3975}
3976
3977// aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
3978inline at::Tensor fbgemm_linear_fp16_weight(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
3979 return at::_ops::fbgemm_linear_fp16_weight::call(input, packed_weight, bias);
3980}
3981
3982// aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor
3984 return at::_ops::fbgemm_pack_quantized_matrix::call(input);
3985}
3986
3987// aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor
3988inline at::Tensor fbgemm_pack_quantized_matrix(const at::Tensor & input, int64_t K, int64_t N) {
3989 return at::_ops::fbgemm_pack_quantized_matrix_KN::call(input, K, N);
3990}
3991
3992// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor
3993inline at::Tensor ldexp(const at::Tensor & self, const at::Tensor & other) {
3994 return at::_ops::ldexp_Tensor::call(self, other);
3995}
3996
3997// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)
3998inline at::Tensor & ldexp_(at::Tensor & self, const at::Tensor & other) {
3999 return at::_ops::ldexp_::call(self, other);
4000}
4001
4002// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4003inline at::Tensor & ldexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
4004 return at::_ops::ldexp_out::call(self, other, out);
4005}
4006// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4007inline at::Tensor & ldexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4008 return at::_ops::ldexp_out::call(self, other, out);
4009}
4010
4011// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4012inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}) {
4013 return at::_ops::linspace::call(start, end, steps, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
4014}
4015// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4016inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4017 return at::_ops::linspace::call(start, end, steps, dtype, layout, device, pin_memory);
4018}
4019
4020// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
4021inline at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps) {
4022 return at::_ops::linspace_out::call(start, end, steps, out);
4023}
4024// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
4025inline at::Tensor & linspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
4026 return at::_ops::linspace_out::call(start, end, steps, out);
4027}
4028
4029// aten::log(Tensor self) -> Tensor
4030inline at::Tensor log(const at::Tensor & self) {
4031 return at::_ops::log::call(self);
4032}
4033
4034// aten::log_(Tensor(a!) self) -> Tensor(a!)
4035inline at::Tensor & log_(at::Tensor & self) {
4036 return at::_ops::log_::call(self);
4037}
4038
4039// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4040inline at::Tensor & log_out(at::Tensor & out, const at::Tensor & self) {
4041 return at::_ops::log_out::call(self, out);
4042}
4043// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4044inline at::Tensor & log_outf(const at::Tensor & self, at::Tensor & out) {
4045 return at::_ops::log_out::call(self, out);
4046}
4047
4048// aten::log10(Tensor self) -> Tensor
4049inline at::Tensor log10(const at::Tensor & self) {
4050 return at::_ops::log10::call(self);
4051}
4052
4053// aten::log10_(Tensor(a!) self) -> Tensor(a!)
4054inline at::Tensor & log10_(at::Tensor & self) {
4055 return at::_ops::log10_::call(self);
4056}
4057
4058// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4059inline at::Tensor & log10_out(at::Tensor & out, const at::Tensor & self) {
4060 return at::_ops::log10_out::call(self, out);
4061}
4062// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4063inline at::Tensor & log10_outf(const at::Tensor & self, at::Tensor & out) {
4064 return at::_ops::log10_out::call(self, out);
4065}
4066
4067// aten::log1p(Tensor self) -> Tensor
4068inline at::Tensor log1p(const at::Tensor & self) {
4069 return at::_ops::log1p::call(self);
4070}
4071
4072// aten::log1p_(Tensor(a!) self) -> Tensor(a!)
4073inline at::Tensor & log1p_(at::Tensor & self) {
4074 return at::_ops::log1p_::call(self);
4075}
4076
4077// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4078inline at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self) {
4079 return at::_ops::log1p_out::call(self, out);
4080}
4081// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4082inline at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out) {
4083 return at::_ops::log1p_out::call(self, out);
4084}
4085
4086// aten::log2(Tensor self) -> Tensor
4087inline at::Tensor log2(const at::Tensor & self) {
4088 return at::_ops::log2::call(self);
4089}
4090
4091// aten::log2_(Tensor(a!) self) -> Tensor(a!)
4092inline at::Tensor & log2_(at::Tensor & self) {
4093 return at::_ops::log2_::call(self);
4094}
4095
4096// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4097inline at::Tensor & log2_out(at::Tensor & out, const at::Tensor & self) {
4098 return at::_ops::log2_out::call(self, out);
4099}
4100// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4101inline at::Tensor & log2_outf(const at::Tensor & self, at::Tensor & out) {
4102 return at::_ops::log2_out::call(self, out);
4103}
4104
4105// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4106inline at::Tensor & logaddexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
4107 return at::_ops::logaddexp_out::call(self, other, out);
4108}
4109// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4110inline at::Tensor & logaddexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4111 return at::_ops::logaddexp_out::call(self, other, out);
4112}
4113
4114// aten::logaddexp(Tensor self, Tensor other) -> Tensor
4115inline at::Tensor logaddexp(const at::Tensor & self, const at::Tensor & other) {
4116 return at::_ops::logaddexp::call(self, other);
4117}
4118
4119// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4120inline at::Tensor & logaddexp2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
4121 return at::_ops::logaddexp2_out::call(self, other, out);
4122}
4123// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4124inline at::Tensor & logaddexp2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4125 return at::_ops::logaddexp2_out::call(self, other, out);
4126}
4127
4128// aten::logaddexp2(Tensor self, Tensor other) -> Tensor
4129inline at::Tensor logaddexp2(const at::Tensor & self, const at::Tensor & other) {
4130 return at::_ops::logaddexp2::call(self, other);
4131}
4132
4133// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor
4134inline at::Tensor xlogy(const at::Tensor & self, const at::Tensor & other) {
4135 return at::_ops::xlogy_Tensor::call(self, other);
4136}
4137
4138// aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor
4139inline at::Tensor xlogy(const at::Scalar & self, const at::Tensor & other) {
4140 return at::_ops::xlogy_Scalar_Self::call(self, other);
4141}
4142
4143// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
4144inline at::Tensor xlogy(const at::Tensor & self, const at::Scalar & other) {
4145 return at::_ops::xlogy_Scalar_Other::call(self, other);
4146}
4147
4148// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4149inline at::Tensor & xlogy_(at::Tensor & self, const at::Tensor & other) {
4150 return at::_ops::xlogy__Tensor::call(self, other);
4151}
4152
4153// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
4154inline at::Tensor & xlogy_(at::Tensor & self, const at::Scalar & other) {
4155 return at::_ops::xlogy__Scalar_Other::call(self, other);
4156}
4157
4158// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4159inline at::Tensor & xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
4160 return at::_ops::xlogy_OutTensor::call(self, other, out);
4161}
4162// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4163inline at::Tensor & xlogy_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4164 return at::_ops::xlogy_OutTensor::call(self, other, out);
4165}
4166
4167// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4168inline at::Tensor & xlogy_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) {
4169 return at::_ops::xlogy_OutScalar_Self::call(self, other, out);
4170}
4171// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4172inline at::Tensor & xlogy_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
4173 return at::_ops::xlogy_OutScalar_Self::call(self, other, out);
4174}
4175
4176// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
4177inline at::Tensor & xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
4178 return at::_ops::xlogy_OutScalar_Other::call(self, other, out);
4179}
4180// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
4181inline at::Tensor & xlogy_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
4182 return at::_ops::xlogy_OutScalar_Other::call(self, other, out);
4183}
4184
4185// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4186inline at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0, at::TensorOptions options={}) {
4187 return at::_ops::logspace::call(start, end, steps, base, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
4188}
4189// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
4190inline at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
4191 return at::_ops::logspace::call(start, end, steps, base, dtype, layout, device, pin_memory);
4192}
4193
4194// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
4195inline at::Tensor & logspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0) {
4196 return at::_ops::logspace_out::call(start, end, steps, base, out);
4197}
4198// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)
4199inline at::Tensor & logspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
4200 return at::_ops::logspace_out::call(start, end, steps, base, out);
4201}
4202
4203// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
4205 return at::_ops::log_softmax_int::call(self, dim, dtype);
4206}
4207
4208// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
4210 return at::_ops::log_softmax_int_out::call(self, dim, dtype, out);
4211}
4212// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
4213inline at::Tensor & log_softmax_outf(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
4214 return at::_ops::log_softmax_int_out::call(self, dim, dtype, out);
4215}
4216
4217// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
4219 return at::_ops::log_softmax_Dimname::call(self, dim, dtype);
4220}
4221
4222// aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
4223inline at::Tensor _log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
4224 return at::_ops::_log_softmax::call(self, dim, half_to_float);
4225}
4226
4227// aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
4228inline at::Tensor & _log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) {
4229 return at::_ops::_log_softmax_out::call(self, dim, half_to_float, out);
4230}
4231// aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
4232inline at::Tensor & _log_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
4233 return at::_ops::_log_softmax_out::call(self, dim, half_to_float, out);
4234}
4235
4236// aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
4237inline at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
4238 return at::_ops::_log_softmax_backward_data::call(grad_output, output, dim, input_dtype);
4239}
4240
4241// aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
4242inline at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
4243 return at::_ops::_log_softmax_backward_data_out::call(grad_output, output, dim, input_dtype, out);
4244}
4245// aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
4246inline at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) {
4247 return at::_ops::_log_softmax_backward_data_out::call(grad_output, output, dim, input_dtype, out);
4248}
4249
4250// aten::_logcumsumexp(Tensor self, int dim) -> Tensor
4251inline at::Tensor _logcumsumexp(const at::Tensor & self, int64_t dim) {
4252 return at::_ops::_logcumsumexp::call(self, dim);
4253}
4254
4255// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
4256inline at::Tensor & _logcumsumexp_out(at::Tensor & out, const at::Tensor & self, int64_t dim) {
4257 return at::_ops::_logcumsumexp_out::call(self, dim, out);
4258}
4259// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
4260inline at::Tensor & _logcumsumexp_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) {
4261 return at::_ops::_logcumsumexp_out::call(self, dim, out);
4262}
4263
4264// aten::logcumsumexp(Tensor self, int dim) -> Tensor
4265inline at::Tensor logcumsumexp(const at::Tensor & self, int64_t dim) {
4266 return at::_ops::logcumsumexp::call(self, dim);
4267}
4268
4269// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
4270inline at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, int64_t dim) {
4271 return at::_ops::logcumsumexp_out::call(self, dim, out);
4272}
4273// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
4274inline at::Tensor & logcumsumexp_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) {
4275 return at::_ops::logcumsumexp_out::call(self, dim, out);
4276}
4277
4278// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
4279inline at::Tensor logcumsumexp(const at::Tensor & self, at::Dimname dim) {
4280 return at::_ops::logcumsumexp_dimname::call(self, dim);
4281}
4282
4283// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
4284inline at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim) {
4285 return at::_ops::logcumsumexp_dimname_out::call(self, dim, out);
4286}
4287// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
4288inline at::Tensor & logcumsumexp_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & out) {
4289 return at::_ops::logcumsumexp_dimname_out::call(self, dim, out);
4290}
4291
4292// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
4293inline at::Tensor logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) {
4294 return at::_ops::logsumexp::call(self, dim, keepdim);
4295}
4296
4297// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4298inline at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) {
4299 return at::_ops::logsumexp_out::call(self, dim, keepdim, out);
4300}
4301// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4302inline at::Tensor & logsumexp_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
4303 return at::_ops::logsumexp_out::call(self, dim, keepdim, out);
4304}
4305
4306// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
4307inline at::Tensor logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim=false) {
4308 return at::_ops::logsumexp_names::call(self, dim, keepdim);
4309}
4310
4311// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4312inline at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false) {
4313 return at::_ops::logsumexp_names_out::call(self, dim, keepdim, out);
4314}
4315// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4316inline at::Tensor & logsumexp_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) {
4317 return at::_ops::logsumexp_names_out::call(self, dim, keepdim, out);
4318}
4319
4320// aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
4321inline at::Tensor margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean) {
4322 return at::_ops::margin_ranking_loss::call(input1, input2, target, margin, reduction);
4323}
4324
4325// aten::matmul(Tensor self, Tensor other) -> Tensor
4326inline at::Tensor matmul(const at::Tensor & self, const at::Tensor & other) {
4327 return at::_ops::matmul::call(self, other);
4328}
4329
4330// aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)
4331inline ::std::tuple<at::Tensor,at::Tensor> matmul_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask) {
4332 return at::_ops::matmul_backward::call(grad, self, other, mask);
4333}
4334
4335// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4336inline at::Tensor & matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
4337 return at::_ops::matmul_out::call(self, other, out);
4338}
4339// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4340inline at::Tensor & matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4341 return at::_ops::matmul_out::call(self, other, out);
4342}
4343
4344// aten::matrix_power(Tensor self, int n) -> Tensor
4345inline at::Tensor matrix_power(const at::Tensor & self, int64_t n) {
4346 return at::_ops::matrix_power::call(self, n);
4347}
4348
4349// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
4350inline at::Tensor & matrix_power_out(at::Tensor & out, const at::Tensor & self, int64_t n) {
4351 return at::_ops::matrix_power_out::call(self, n, out);
4352}
4353// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
4354inline at::Tensor & matrix_power_outf(const at::Tensor & self, int64_t n, at::Tensor & out) {
4355 return at::_ops::matrix_power_out::call(self, n, out);
4356}
4357
4358// aten::matrix_exp(Tensor self) -> Tensor
4359inline at::Tensor matrix_exp(const at::Tensor & self) {
4360 return at::_ops::matrix_exp::call(self);
4361}
4362
4363// aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
4364inline at::Tensor matrix_exp_backward(const at::Tensor & self, const at::Tensor & grad) {
4365 return at::_ops::matrix_exp_backward::call(self, grad);
4366}
4367
4368// aten::_aminmax(Tensor self) -> (Tensor, Tensor)
4369inline ::std::tuple<at::Tensor,at::Tensor> _aminmax(const at::Tensor & self) {
4370 return at::_ops::_aminmax::call(self);
4371}
4372
4373// aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
4374inline ::std::tuple<at::Tensor,at::Tensor> _aminmax(const at::Tensor & self, int64_t dim, bool keepdim=false) {
4375 return at::_ops::_aminmax_dim::call(self, dim, keepdim);
4376}
4377
4378// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
4379inline ::std::tuple<at::Tensor,at::Tensor> aminmax(const at::Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) {
4380 return at::_ops::aminmax::call(self, dim, keepdim);
4381}
4382
4383// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
4384inline ::std::tuple<at::Tensor &,at::Tensor &> aminmax_out(at::Tensor & min, at::Tensor & max, const at::Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) {
4385 return at::_ops::aminmax_out::call(self, dim, keepdim, min, max);
4386}
4387// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
4388inline ::std::tuple<at::Tensor &,at::Tensor &> aminmax_outf(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max) {
4389 return at::_ops::aminmax_out::call(self, dim, keepdim, min, max);
4390}
4391
4392// aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor
4393inline at::Tensor _compute_linear_combination(const at::Tensor & input, const at::Tensor & coefficients) {
4394 return at::_ops::_compute_linear_combination::call(input, coefficients);
4395}
4396
4397// aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)
4398inline at::Tensor & _compute_linear_combination_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & coefficients) {
4399 return at::_ops::_compute_linear_combination_out::call(input, coefficients, out);
4400}
4401// aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)
4402inline at::Tensor & _compute_linear_combination_outf(const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) {
4403 return at::_ops::_compute_linear_combination_out::call(input, coefficients, out);
4404}
4405
4406// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4407inline ::std::tuple<at::Tensor,at::Tensor> max(const at::Tensor & self, int64_t dim, bool keepdim=false) {
4408 return at::_ops::max_dim::call(self, dim, keepdim);
4409}
4410
4411// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
4412inline ::std::tuple<at::Tensor &,at::Tensor &> max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false) {
4413 return at::_ops::max_dim_max::call(self, dim, keepdim, max, max_values);
4414}
4415// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
4416inline ::std::tuple<at::Tensor &,at::Tensor &> max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
4417 return at::_ops::max_dim_max::call(self, dim, keepdim, max, max_values);
4418}
4419
4420// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4421inline ::std::tuple<at::Tensor,at::Tensor> max(const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
4422 return at::_ops::max_names_dim::call(self, dim, keepdim);
4423}
4424
4425// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
4426inline ::std::tuple<at::Tensor &,at::Tensor &> max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
4427 return at::_ops::max_names_dim_max::call(self, dim, keepdim, max, max_values);
4428}
4429// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
4430inline ::std::tuple<at::Tensor &,at::Tensor &> max_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
4431 return at::_ops::max_names_dim_max::call(self, dim, keepdim, max, max_values);
4432}
4433
4434// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor
4435inline at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim) {
4436 return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim);
4437}
4438namespace symint {
4439 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
4440 at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim) {
4441 return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim);
4442 }
4443}
4444
4445// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor
4446inline at::Tensor value_selecting_reduction_backward_symint(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
4447 return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim);
4448}
4449namespace symint {
4450 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
4451 at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
4452 return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim);
4453 }
4454}
4455
4456// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
4457inline at::Tensor amax(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false) {
4458 return at::_ops::amax::call(self, dim, keepdim);
4459}
4460
4461// aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4462inline at::Tensor & amax_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false) {
4463 return at::_ops::amax_out::call(self, dim, keepdim, out);
4464}
4465// aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4466inline at::Tensor & amax_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
4467 return at::_ops::amax_out::call(self, dim, keepdim, out);
4468}
4469
4470// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
4471inline ::std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
4472 return at::_ops::max_pool1d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
4473}
4474
4475// aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
4476inline at::Tensor max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
4477 return at::_ops::max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
4478}
4479
4480// aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
4481inline at::Tensor max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
4482 return at::_ops::max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
4483}
4484
4485// aten::max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
4486inline at::Tensor max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
4487 return at::_ops::max_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode);
4488}
4489
4490// aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
4491inline at::Tensor mkldnn_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
4492 return at::_ops::mkldnn_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
4493}
4494
4495// aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
4496inline at::Tensor mkldnn_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
4497 return at::_ops::mkldnn_max_pool2d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
4498}
4499
4500// aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
4501inline at::Tensor mkldnn_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
4502 return at::_ops::mkldnn_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
4503}
4504
4505// aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
4506inline at::Tensor mkldnn_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
4507 return at::_ops::mkldnn_max_pool3d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
4508}
4509
4510// aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
4511inline at::Tensor quantized_max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
4512 return at::_ops::quantized_max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
4513}
4514
4515// aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
4516inline at::Tensor quantized_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
4517 return at::_ops::quantized_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
4518}
4519
4520// aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
4521inline at::Tensor max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
4522 return at::_ops::max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
4523}
4524
4525// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
4527 return at::_ops::mean::call(self, dtype);
4528}
4529
4530// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
4531inline at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
4532 return at::_ops::mean_dim::call(self, dim, keepdim, dtype);
4533}
4534
4535// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
4536inline at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
4537 return at::_ops::mean_out::call(self, dim, keepdim, dtype, out);
4538}
4539// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
4540inline at::Tensor & mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
4541 return at::_ops::mean_out::call(self, dim, keepdim, dtype, out);
4542}
4543
4544// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
4545inline at::Tensor mean(const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
4546 return at::_ops::mean_names_dim::call(self, dim, keepdim, dtype);
4547}
4548
4549// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
4550inline at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
4551 return at::_ops::mean_names_out::call(self, dim, keepdim, dtype, out);
4552}
4553// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
4554inline at::Tensor & mean_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
4555 return at::_ops::mean_names_out::call(self, dim, keepdim, dtype, out);
4556}
4557
4558// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
4559inline at::Tensor nanmean(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
4560 return at::_ops::nanmean::call(self, dim, keepdim, dtype);
4561}
4562
4563// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
4564inline at::Tensor & nanmean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
4565 return at::_ops::nanmean_out::call(self, dim, keepdim, dtype, out);
4566}
4567// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
4568inline at::Tensor & nanmean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
4569 return at::_ops::nanmean_out::call(self, dim, keepdim, dtype, out);
4570}
4571
4572// aten::median(Tensor self) -> Tensor
4573inline at::Tensor median(const at::Tensor & self) {
4574 return at::_ops::median::call(self);
4575}
4576
4577// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4578inline ::std::tuple<at::Tensor,at::Tensor> median(const at::Tensor & self, int64_t dim, bool keepdim=false) {
4579 return at::_ops::median_dim::call(self, dim, keepdim);
4580}
4581
4582// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4583inline ::std::tuple<at::Tensor &,at::Tensor &> median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim=false) {
4584 return at::_ops::median_dim_values::call(self, dim, keepdim, values, indices);
4585}
4586// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4587inline ::std::tuple<at::Tensor &,at::Tensor &> median_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
4588 return at::_ops::median_dim_values::call(self, dim, keepdim, values, indices);
4589}
4590
4591// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4592inline ::std::tuple<at::Tensor,at::Tensor> median(const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
4593 return at::_ops::median_names_dim::call(self, dim, keepdim);
4594}
4595
4596// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4597inline ::std::tuple<at::Tensor &,at::Tensor &> median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
4598 return at::_ops::median_names_dim_values::call(self, dim, keepdim, values, indices);
4599}
4600// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4601inline ::std::tuple<at::Tensor &,at::Tensor &> median_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
4602 return at::_ops::median_names_dim_values::call(self, dim, keepdim, values, indices);
4603}
4604
4605// aten::nanmedian(Tensor self) -> Tensor
4606inline at::Tensor nanmedian(const at::Tensor & self) {
4607 return at::_ops::nanmedian::call(self);
4608}
4609
4610// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4611inline ::std::tuple<at::Tensor,at::Tensor> nanmedian(const at::Tensor & self, int64_t dim, bool keepdim=false) {
4612 return at::_ops::nanmedian_dim::call(self, dim, keepdim);
4613}
4614
4615// aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4616inline ::std::tuple<at::Tensor &,at::Tensor &> nanmedian_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim=false) {
4617 return at::_ops::nanmedian_dim_values::call(self, dim, keepdim, values, indices);
4618}
4619// aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4620inline ::std::tuple<at::Tensor &,at::Tensor &> nanmedian_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
4621 return at::_ops::nanmedian_dim_values::call(self, dim, keepdim, values, indices);
4622}
4623
4624// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4625inline ::std::tuple<at::Tensor,at::Tensor> nanmedian(const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
4626 return at::_ops::nanmedian_names_dim::call(self, dim, keepdim);
4627}
4628
4629// aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4630inline ::std::tuple<at::Tensor &,at::Tensor &> nanmedian_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
4631 return at::_ops::nanmedian_names_dim_values::call(self, dim, keepdim, values, indices);
4632}
4633// aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4634inline ::std::tuple<at::Tensor &,at::Tensor &> nanmedian_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
4635 return at::_ops::nanmedian_names_dim_values::call(self, dim, keepdim, values, indices);
4636}
4637
4638// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4639inline ::std::tuple<at::Tensor,at::Tensor> min(const at::Tensor & self, int64_t dim, bool keepdim=false) {
4640 return at::_ops::min_dim::call(self, dim, keepdim);
4641}
4642
4643// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
4644inline ::std::tuple<at::Tensor &,at::Tensor &> min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim=false) {
4645 return at::_ops::min_dim_min::call(self, dim, keepdim, min, min_indices);
4646}
4647// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
4648inline ::std::tuple<at::Tensor &,at::Tensor &> min_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
4649 return at::_ops::min_dim_min::call(self, dim, keepdim, min, min_indices);
4650}
4651
4652// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4653inline ::std::tuple<at::Tensor,at::Tensor> min(const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
4654 return at::_ops::min_names_dim::call(self, dim, keepdim);
4655}
4656
4657// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
4658inline ::std::tuple<at::Tensor &,at::Tensor &> min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
4659 return at::_ops::min_names_dim_min::call(self, dim, keepdim, min, min_indices);
4660}
4661// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
4662inline ::std::tuple<at::Tensor &,at::Tensor &> min_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
4663 return at::_ops::min_names_dim_min::call(self, dim, keepdim, min, min_indices);
4664}
4665
4666// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor
4667inline at::Tensor amin(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false) {
4668 return at::_ops::amin::call(self, dim, keepdim);
4669}
4670
4671// aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4672inline at::Tensor & amin_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false) {
4673 return at::_ops::amin_out::call(self, dim, keepdim, out);
4674}
4675// aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
4676inline at::Tensor & amin_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
4677 return at::_ops::amin_out::call(self, dim, keepdim, out);
4678}
4679
4680// aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor
4681inline at::Tensor _mps_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
4682 return at::_ops::_mps_convolution::call(self, weight, bias, padding, stride, dilation, groups);
4683}
4684
4685// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
4686inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> mps_convolution_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,3> output_mask) {
4687 return at::_ops::mps_convolution_backward::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask);
4688}
4689
4690// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor
4691inline at::Tensor mkldnn_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
4692 return at::_ops::mkldnn_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups);
4693}
4694namespace symint {
4695 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
4696 at::Tensor mkldnn_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
4697 return at::_ops::mkldnn_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups);
4698 }
4699}
4700
4701// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor
4702inline at::Tensor mkldnn_convolution_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
4703 return at::_ops::mkldnn_convolution::call(self, weight, bias, padding, stride, dilation, groups);
4704}
4705namespace symint {
4706 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
4707 at::Tensor mkldnn_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
4708 return at::_ops::mkldnn_convolution::call(self, weight, bias, padding, stride, dilation, groups);
4709 }
4710}
4711
4712// aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor)
4713inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
4714 return at::_ops::mkldnn_rnn_layer::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
4715}
4716
4717// aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
4718inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_backward(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
4719 return at::_ops::mkldnn_rnn_layer_backward::call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace);
4720}
4721
4722// aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
4723inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
4724 return at::_ops::miopen_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
4725}
4726
4727// aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)
4728inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon) {
4729 return at::_ops::miopen_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon);
4730}
4731
4732// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
4733inline at::Tensor miopen_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
4734 return at::_ops::miopen_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic);
4735}
4736namespace symint {
4737 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
4738 at::Tensor miopen_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
4739 return at::_ops::miopen_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic);
4740 }
4741}
4742
4743// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
4744inline at::Tensor miopen_convolution_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
4745 return at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
4746}
4747namespace symint {
4748 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
4749 at::Tensor miopen_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
4750 return at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
4751 }
4752}
4753
4754// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
4755inline at::Tensor miopen_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
4756 return at::_ops::miopen_convolution_transpose::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), stride, dilation, groups, benchmark, deterministic);
4757}
4758namespace symint {
4759 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
4760 at::Tensor miopen_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
4761 return at::_ops::miopen_convolution_transpose::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), stride, dilation, groups, benchmark, deterministic);
4762 }
4763}
4764
4765// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
4766inline at::Tensor miopen_convolution_transpose_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
4767 return at::_ops::miopen_convolution_transpose::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
4768}
4769namespace symint {
4770 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
4771 at::Tensor miopen_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
4772 return at::_ops::miopen_convolution_transpose::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
4773 }
4774}
4775
4776// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
4777inline at::Tensor miopen_depthwise_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
4778 return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic);
4779}
4780namespace symint {
4781 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
4782 at::Tensor miopen_depthwise_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
4783 return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic);
4784 }
4785}
4786
4787// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
4788inline at::Tensor miopen_depthwise_convolution_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
4789 return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
4790}
4791namespace symint {
4792 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
4793 at::Tensor miopen_depthwise_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
4794 return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
4795 }
4796}
4797
4798// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
4799inline at::Tensor miopen_convolution_relu(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
4800 return at::_ops::miopen_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups);
4801}
4802
4803// aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor
4804inline at::Tensor miopen_convolution_add_relu(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
4805 return at::_ops::miopen_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
4806}
4807
4808// aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
4809inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> miopen_rnn(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
4810 return at::_ops::miopen_rnn::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
4811}
4812
4813// aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
4814inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> miopen_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
4815 return at::_ops::miopen_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
4816}
4817
4818// aten::mm(Tensor self, Tensor mat2) -> Tensor
4819inline at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) {
4820 return at::_ops::mm::call(self, mat2);
4821}
4822
4823// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
4824inline at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) {
4825 return at::_ops::mm_out::call(self, mat2, out);
4826}
4827// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
4828inline at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
4829 return at::_ops::mm_out::call(self, mat2, out);
4830}
4831
4832// aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor
4833inline at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense) {
4834 return at::_ops::_sparse_mm::call(sparse, dense);
4835}
4836
4837// aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor
4838inline at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
4839 return at::_ops::_sparse_mm_reduce::call(sparse, dense, reduce);
4840}
4841
4842// aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor
4843inline at::Tensor _sparse_sparse_matmul(const at::Tensor & self, const at::Tensor & other) {
4844 return at::_ops::_sparse_sparse_matmul::call(self, other);
4845}
4846
4847// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
4848inline ::std::tuple<at::Tensor,at::Tensor> mode(const at::Tensor & self, int64_t dim=-1, bool keepdim=false) {
4849 return at::_ops::mode::call(self, dim, keepdim);
4850}
4851
4852// aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4853inline ::std::tuple<at::Tensor &,at::Tensor &> mode_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim=-1, bool keepdim=false) {
4854 return at::_ops::mode_values::call(self, dim, keepdim, values, indices);
4855}
4856// aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4857inline ::std::tuple<at::Tensor &,at::Tensor &> mode_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
4858 return at::_ops::mode_values::call(self, dim, keepdim, values, indices);
4859}
4860
4861// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
4862inline ::std::tuple<at::Tensor,at::Tensor> mode(const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
4863 return at::_ops::mode_dimname::call(self, dim, keepdim);
4864}
4865
4866// aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4867inline ::std::tuple<at::Tensor &,at::Tensor &> mode_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) {
4868 return at::_ops::mode_dimname_out::call(self, dim, keepdim, values, indices);
4869}
4870// aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
4871inline ::std::tuple<at::Tensor &,at::Tensor &> mode_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
4872 return at::_ops::mode_dimname_out::call(self, dim, keepdim, values, indices);
4873}
4874
4875// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor
4876inline at::Tensor mul(const at::Tensor & self, const at::Tensor & other) {
4877 return at::_ops::mul_Tensor::call(self, other);
4878}
4879
4880// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4881inline at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
4882 return at::_ops::mul_out::call(self, other, out);
4883}
4884// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4885inline at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4886 return at::_ops::mul_out::call(self, other, out);
4887}
4888
4889// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor
4890inline at::Tensor mul(const at::Tensor & self, const at::Scalar & other) {
4891 return at::_ops::mul_Scalar::call(self, other);
4892}
4893
4894// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor
4895inline at::Tensor multiply(const at::Tensor & self, const at::Tensor & other) {
4896 return at::_ops::multiply_Tensor::call(self, other);
4897}
4898
4899// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4900inline at::Tensor & multiply_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
4901 return at::_ops::multiply_out::call(self, other, out);
4902}
4903// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4904inline at::Tensor & multiply_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
4905 return at::_ops::multiply_out::call(self, other, out);
4906}
4907
4908// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor
4909inline at::Tensor multiply(const at::Tensor & self, const at::Scalar & other) {
4910 return at::_ops::multiply_Scalar::call(self, other);
4911}
4912
4913// aten::mv(Tensor self, Tensor vec) -> Tensor
4914inline at::Tensor mv(const at::Tensor & self, const at::Tensor & vec) {
4915 return at::_ops::mv::call(self, vec);
4916}
4917
4918// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
4919inline at::Tensor & mv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec) {
4920 return at::_ops::mv_out::call(self, vec, out);
4921}
4922// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
4923inline at::Tensor & mv_outf(const at::Tensor & self, const at::Tensor & vec, at::Tensor & out) {
4924 return at::_ops::mv_out::call(self, vec, out);
4925}
4926
4927// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
4928inline at::Tensor & mvlgamma_out(at::Tensor & out, const at::Tensor & self, int64_t p) {
4929 return at::_ops::mvlgamma_out::call(self, p, out);
4930}
4931// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
4932inline at::Tensor & mvlgamma_outf(const at::Tensor & self, int64_t p, at::Tensor & out) {
4933 return at::_ops::mvlgamma_out::call(self, p, out);
4934}
4935
4936// aten::mvlgamma(Tensor self, int p) -> Tensor
4937inline at::Tensor mvlgamma(const at::Tensor & self, int64_t p) {
4938 return at::_ops::mvlgamma::call(self, p);
4939}
4940
4941// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
4942inline at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
4943 return at::_ops::narrow_copy::call(self, dim, start, length);
4944}
4945namespace symint {
4946 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
4947 at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
4948 return at::_ops::narrow_copy::call(self, dim, start, length);
4949 }
4950}
4951
4952// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor
4953inline at::Tensor narrow_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
4954 return at::_ops::narrow_copy::call(self, dim, start, length);
4955}
4956namespace symint {
4957 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
4958 at::Tensor narrow_copy(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
4959 return at::_ops::narrow_copy::call(self, dim, start, length);
4960 }
4961}
4962
4963// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
4964inline at::Tensor & narrow_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
4965 return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
4966}
4967namespace symint {
4968 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
4969 at::Tensor & narrow_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
4970 return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
4971 }
4972}
4973
4974// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
4975inline at::Tensor & narrow_copy_outf(const at::Tensor & self, int64_t dim, int64_t start, int64_t length, at::Tensor & out) {
4976 return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
4977}
4978namespace symint {
4979 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
4980 at::Tensor & narrow_copy_outf(const at::Tensor & self, int64_t dim, int64_t start, int64_t length, at::Tensor & out) {
4981 return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
4982 }
4983}
4984
4985// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
4986inline at::Tensor & narrow_copy_symint_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
4987 return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
4988}
4989namespace symint {
4990 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
4991 at::Tensor & narrow_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
4992 return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
4993 }
4994}
4995
4996// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)
4997inline at::Tensor & narrow_copy_symint_outf(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
4998 return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
4999}
5000namespace symint {
5001 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5002 at::Tensor & narrow_copy_outf(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
5003 return at::_ops::narrow_copy_out::call(self, dim, start, length, out);
5004 }
5005}
5006
5007// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
5008inline at::Tensor narrow(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
5009 return at::_ops::narrow::call(self, dim, start, length);
5010}
5011namespace symint {
5012 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5013 at::Tensor narrow(const at::Tensor & self, int64_t dim, int64_t start, int64_t length) {
5014 return at::_ops::narrow::call(self, dim, start, length);
5015 }
5016}
5017
5018// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)
5019inline at::Tensor narrow_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
5020 return at::_ops::narrow::call(self, dim, start, length);
5021}
5022namespace symint {
5023 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5024 at::Tensor narrow(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
5025 return at::_ops::narrow::call(self, dim, start, length);
5026 }
5027}
5028
5029// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
5030inline at::Tensor narrow(const at::Tensor & self, int64_t dim, const at::Tensor & start, int64_t length) {
5031 return at::_ops::narrow_Tensor::call(self, dim, start, length);
5032}
5033namespace symint {
5034 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5035 at::Tensor narrow(const at::Tensor & self, int64_t dim, const at::Tensor & start, int64_t length) {
5036 return at::_ops::narrow_Tensor::call(self, dim, start, length);
5037 }
5038}
5039
5040// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)
5041inline at::Tensor narrow_symint(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
5042 return at::_ops::narrow_Tensor::call(self, dim, start, length);
5043}
5044namespace symint {
5045 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5046 at::Tensor narrow(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
5047 return at::_ops::narrow_Tensor::call(self, dim, start, length);
5048 }
5049}
5050
5051// aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
5052inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
5053 return at::_ops::native_batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
5054}
5055
5056// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
5057inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
5058 return at::_ops::native_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
5059}
5060// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
5061inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
5062 return at::_ops::native_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
5063}
5064
5065// aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
5066inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
5067 return at::_ops::_native_batch_norm_legit::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
5068}
5069
5070// aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
5071inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
5072 return at::_ops::_native_batch_norm_legit_out::call(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
5073}
5074// aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
5075inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
5076 return at::_ops::_native_batch_norm_legit_out::call(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
5077}
5078
5079// aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
5080inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
5081 return at::_ops::_native_batch_norm_legit_no_stats::call(input, weight, bias, training, momentum, eps);
5082}
5083
5084// aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
5085inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
5086 return at::_ops::_native_batch_norm_legit_no_stats_out::call(input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
5087}
5088// aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
5089inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
5090 return at::_ops::_native_batch_norm_legit_no_stats_out::call(input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
5091}
5092
5093// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
5094inline ::std::tuple<at::Tensor,at::Tensor> batch_norm_stats(const at::Tensor & input, double eps) {
5095 return at::_ops::batch_norm_stats::call(input, eps);
5096}
5097
5098// aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor
5099inline at::Tensor batch_norm_elemt(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
5100 return at::_ops::batch_norm_elemt::call(input, weight, bias, mean, invstd, eps);
5101}
5102
5103// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
5104inline at::Tensor & batch_norm_elemt_out(at::Tensor & out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
5105 return at::_ops::batch_norm_elemt_out::call(input, weight, bias, mean, invstd, eps, out);
5106}
5107// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)
5108inline at::Tensor & batch_norm_elemt_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out) {
5109 return at::_ops::batch_norm_elemt_out::call(input, weight, bias, mean, invstd, eps, out);
5110}
5111
5112// aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)
5113inline ::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count) {
5114 return at::_ops::batch_norm_gather_stats::call(input, mean, invstd, running_mean, running_var, momentum, eps, count);
5115}
5116
5117// aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
5118inline ::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_with_counts(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
5119 return at::_ops::batch_norm_gather_stats_with_counts::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts);
5120}
5121
5122// aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
5123inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
5124 return at::_ops::native_batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
5125}
5126
5127// aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
5128inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_reduce(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
5129 return at::_ops::batch_norm_backward_reduce::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
5130}
5131
5132// aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor
5133inline at::Tensor batch_norm_backward_elemt(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count) {
5134 return at::_ops::batch_norm_backward_elemt::call(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count);
5135}
5136
5137// aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)
5138inline ::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum) {
5139 return at::_ops::batch_norm_update_stats::call(input, running_mean, running_var, momentum);
5140}
5141
5142// aten::is_vulkan_available() -> bool
5143inline bool is_vulkan_available() {
5144 return at::_ops::is_vulkan_available::call();
5145}
5146
5147// aten::_nnpack_available() -> bool
5148inline bool _nnpack_available() {
5149 return at::_ops::_nnpack_available::call();
5150}
5151
5152// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor
5153inline at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) {
5154 return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), stride);
5155}
5156namespace symint {
5157 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5158 at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) {
5159 return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), stride);
5160 }
5161}
5162
5163// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor
5164inline at::Tensor _nnpack_spatial_convolution_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride=1) {
5165 return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride);
5166}
5167namespace symint {
5168 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5169 at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride=1) {
5170 return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride);
5171 }
5172}
5173
5174// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5175inline at::Tensor ones(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
5176 return at::_ops::ones_names::call(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5177}
5178// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5180 return at::_ops::ones_names::call(size, names, dtype, layout, device, pin_memory);
5181}
5182
5183// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5184inline at::Tensor ones(at::IntArrayRef size, at::TensorOptions options={}) {
5185 return at::_ops::ones::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5186}
5187namespace symint {
5188 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5189 at::Tensor ones(at::IntArrayRef size, at::TensorOptions options={}) {
5190 return at::_ops::ones::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5191 }
5192}
5193
5194// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5196 return at::_ops::ones::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
5197}
5198namespace symint {
5199 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5201 return at::_ops::ones::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
5202 }
5203}
5204
5205// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5206inline at::Tensor ones_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) {
5207 return at::_ops::ones::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5208}
5209namespace symint {
5210 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5211 at::Tensor ones(c10::SymIntArrayRef size, at::TensorOptions options={}) {
5212 return at::_ops::ones::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5213 }
5214}
5215
5216// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5218 return at::_ops::ones::call(size, dtype, layout, device, pin_memory);
5219}
5220namespace symint {
5221 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5223 return at::_ops::ones::call(size, dtype, layout, device, pin_memory);
5224 }
5225}
5226
5227// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5228inline at::Tensor & ones_out(at::Tensor & out, at::IntArrayRef size) {
5229 return at::_ops::ones_out::call(c10::fromIntArrayRefSlow(size), out);
5230}
5231namespace symint {
5232 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5233 at::Tensor & ones_out(at::Tensor & out, at::IntArrayRef size) {
5234 return at::_ops::ones_out::call(c10::fromIntArrayRefSlow(size), out);
5235 }
5236}
5237
5238// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5239inline at::Tensor & ones_outf(at::IntArrayRef size, at::Tensor & out) {
5240 return at::_ops::ones_out::call(c10::fromIntArrayRefSlow(size), out);
5241}
5242namespace symint {
5243 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5244 at::Tensor & ones_outf(at::IntArrayRef size, at::Tensor & out) {
5245 return at::_ops::ones_out::call(c10::fromIntArrayRefSlow(size), out);
5246 }
5247}
5248
5249// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5250inline at::Tensor & ones_symint_out(at::Tensor & out, c10::SymIntArrayRef size) {
5251 return at::_ops::ones_out::call(size, out);
5252}
5253namespace symint {
5254 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5255 at::Tensor & ones_out(at::Tensor & out, c10::SymIntArrayRef size) {
5256 return at::_ops::ones_out::call(size, out);
5257 }
5258}
5259
5260// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5261inline at::Tensor & ones_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) {
5262 return at::_ops::ones_out::call(size, out);
5263}
5264namespace symint {
5265 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5266 at::Tensor & ones_outf(c10::SymIntArrayRef size, at::Tensor & out) {
5267 return at::_ops::ones_out::call(size, out);
5268 }
5269}
5270
5271// aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
5272inline at::Tensor ones_like(const at::Tensor & self, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
5273 return at::_ops::ones_like::call(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
5274}
5275// aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
5277 return at::_ops::ones_like::call(self, dtype, layout, device, pin_memory, memory_format);
5278}
5279
5280// aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
5281inline at::Tensor pairwise_distance(const at::Tensor & x1, const at::Tensor & x2, double p=2, double eps=1e-06, bool keepdim=false) {
5282 return at::_ops::pairwise_distance::call(x1, x2, p, eps, keepdim);
5283}
5284
5285// aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
5286inline at::Tensor cdist(const at::Tensor & x1, const at::Tensor & x2, double p=2, c10::optional<int64_t> compute_mode=c10::nullopt) {
5287 return at::_ops::cdist::call(x1, x2, p, compute_mode);
5288}
5289
5290// aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor
5291inline at::Tensor _euclidean_dist(const at::Tensor & x1, const at::Tensor & x2) {
5292 return at::_ops::_euclidean_dist::call(x1, x2);
5293}
5294
5295// aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
5296inline at::Tensor _cdist_forward(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
5297 return at::_ops::_cdist_forward::call(x1, x2, p, compute_mode);
5298}
5299
5300// aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
5301inline at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
5302 return at::_ops::_cdist_backward::call(grad, x1, x2, p, cdist);
5303}
5304
5305// aten::pdist(Tensor self, float p=2) -> Tensor
5306inline at::Tensor pdist(const at::Tensor & self, double p=2) {
5307 return at::_ops::pdist::call(self, p);
5308}
5309
5310// aten::_pdist_forward(Tensor self, float p=2) -> Tensor
5311inline at::Tensor _pdist_forward(const at::Tensor & self, double p=2) {
5312 return at::_ops::_pdist_forward::call(self, p);
5313}
5314
5315// aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor
5316inline at::Tensor _pdist_backward(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
5317 return at::_ops::_pdist_backward::call(grad, self, p, pdist);
5318}
5319
5320// aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor
5321inline at::Tensor cosine_similarity(const at::Tensor & x1, const at::Tensor & x2, int64_t dim=1, double eps=1e-08) {
5322 return at::_ops::cosine_similarity::call(x1, x2, dim, eps);
5323}
5324
5325// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)
5326inline at::Tensor permute(const at::Tensor & self, at::IntArrayRef dims) {
5327 return at::_ops::permute::call(self, dims);
5328}
5329
5330// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
5331inline at::Tensor movedim(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
5332 return at::_ops::movedim_intlist::call(self, source, destination);
5333}
5334
5335// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)
5336inline at::Tensor movedim(const at::Tensor & self, int64_t source, int64_t destination) {
5337 return at::_ops::movedim_int::call(self, source, destination);
5338}
5339
5340// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)
5341inline at::Tensor moveaxis(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
5342 return at::_ops::moveaxis_intlist::call(self, source, destination);
5343}
5344
5345// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)
5346inline at::Tensor moveaxis(const at::Tensor & self, int64_t source, int64_t destination) {
5347 return at::_ops::moveaxis_int::call(self, source, destination);
5348}
5349
5350// aten::adjoint(Tensor(a) self) -> Tensor(a)
5351inline at::Tensor adjoint(const at::Tensor & self) {
5352 return at::_ops::adjoint::call(self);
5353}
5354
5355// aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
5356inline at::Tensor pixel_shuffle(const at::Tensor & self, int64_t upscale_factor) {
5357 return at::_ops::pixel_shuffle::call(self, upscale_factor);
5358}
5359
5360// aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor
5361inline at::Tensor pixel_unshuffle(const at::Tensor & self, int64_t downscale_factor) {
5362 return at::_ops::pixel_unshuffle::call(self, downscale_factor);
5363}
5364
5365// aten::channel_shuffle(Tensor self, int groups) -> Tensor
5366inline at::Tensor channel_shuffle(const at::Tensor & self, int64_t groups) {
5367 return at::_ops::channel_shuffle::call(self, groups);
5368}
5369
5370// aten::native_channel_shuffle(Tensor self, int groups) -> Tensor
5371inline at::Tensor native_channel_shuffle(const at::Tensor & self, int64_t groups) {
5372 return at::_ops::native_channel_shuffle::call(self, groups);
5373}
5374
5375// aten::_pin_memory(Tensor self, Device? device=None) -> Tensor
5377 return at::_ops::_pin_memory::call(self, device);
5378}
5379
5380// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor
5381inline at::Tensor pinverse(const at::Tensor & self, double rcond=1e-15) {
5382 return at::_ops::pinverse::call(self, rcond);
5383}
5384
5385// aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor
5386inline at::Tensor poisson_nll_loss(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
5387 return at::_ops::poisson_nll_loss::call(input, target, log_input, full, eps, reduction);
5388}
5389
5390// aten::rad2deg(Tensor self) -> Tensor
5391inline at::Tensor rad2deg(const at::Tensor & self) {
5392 return at::_ops::rad2deg::call(self);
5393}
5394
5395// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)
5397 return at::_ops::rad2deg_::call(self);
5398}
5399
5400// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5401inline at::Tensor & rad2deg_out(at::Tensor & out, const at::Tensor & self) {
5402 return at::_ops::rad2deg_out::call(self, out);
5403}
5404// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5405inline at::Tensor & rad2deg_outf(const at::Tensor & self, at::Tensor & out) {
5406 return at::_ops::rad2deg_out::call(self, out);
5407}
5408
5409// aten::deg2rad(Tensor self) -> Tensor
5410inline at::Tensor deg2rad(const at::Tensor & self) {
5411 return at::_ops::deg2rad::call(self);
5412}
5413
5414// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!)
5416 return at::_ops::deg2rad_::call(self);
5417}
5418
5419// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5420inline at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self) {
5421 return at::_ops::deg2rad_out::call(self, out);
5422}
5423// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5424inline at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out) {
5425 return at::_ops::deg2rad_out::call(self, out);
5426}
5427
5428// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5429inline at::Tensor scalar_tensor(const at::Scalar & s, at::TensorOptions options={}) {
5430 return at::_ops::scalar_tensor::call(s, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5431}
5432// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5434 return at::_ops::scalar_tensor::call(s, dtype, layout, device, pin_memory);
5435}
5436
5437// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5438inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
5439 return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5440}
5441namespace symint {
5442 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5443 at::Tensor rand(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
5444 return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5445 }
5446}
5447
5448// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5450 return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory);
5451}
5452namespace symint {
5453 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5455 return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory);
5456 }
5457}
5458
5459// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5460inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
5461 return at::_ops::rand_names::call(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5462}
5463namespace symint {
5464 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5465 at::Tensor rand(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
5466 return at::_ops::rand_names::call(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5467 }
5468}
5469
5470// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5472 return at::_ops::rand_names::call(size, names, dtype, layout, device, pin_memory);
5473}
5474namespace symint {
5475 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5477 return at::_ops::rand_names::call(size, names, dtype, layout, device, pin_memory);
5478 }
5479}
5480
5481// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5482inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
5483 return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5484}
5485namespace symint {
5486 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5487 at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
5488 return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5489 }
5490}
5491
5492// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5494 return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory);
5495}
5496namespace symint {
5497 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5499 return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory);
5500 }
5501}
5502
5503// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5504inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
5505 return at::_ops::rand_generator_with_names::call(size, generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5506}
5507namespace symint {
5508 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5509 at::Tensor rand(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
5510 return at::_ops::rand_generator_with_names::call(size, generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5511 }
5512}
5513
5514// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5516 return at::_ops::rand_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory);
5517}
5518namespace symint {
5519 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5521 return at::_ops::rand_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory);
5522 }
5523}
5524
5525// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5526inline at::Tensor rand(at::IntArrayRef size, at::TensorOptions options={}) {
5527 return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5528}
5529namespace symint {
5530 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5531 at::Tensor rand(at::IntArrayRef size, at::TensorOptions options={}) {
5532 return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5533 }
5534}
5535
5536// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5538 return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
5539}
5540namespace symint {
5541 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5543 return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
5544 }
5545}
5546
5547// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5548inline at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) {
5549 return at::_ops::rand::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5550}
5551namespace symint {
5552 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5553 at::Tensor rand(c10::SymIntArrayRef size, at::TensorOptions options={}) {
5554 return at::_ops::rand::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5555 }
5556}
5557
5558// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5560 return at::_ops::rand::call(size, dtype, layout, device, pin_memory);
5561}
5562namespace symint {
5563 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5565 return at::_ops::rand::call(size, dtype, layout, device, pin_memory);
5566 }
5567}
5568
5569// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5570inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options={}) {
5571 return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5572}
5573namespace symint {
5574 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5575 at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options={}) {
5576 return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5577 }
5578}
5579
5580// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5582 return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
5583}
5584namespace symint {
5585 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5587 return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
5588 }
5589}
5590
5591// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5592inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options={}) {
5593 return at::_ops::rand_generator::call(size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5594}
5595namespace symint {
5596 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5597 at::Tensor rand(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options={}) {
5598 return at::_ops::rand_generator::call(size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5599 }
5600}
5601
5602// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5604 return at::_ops::rand_generator::call(size, generator, dtype, layout, device, pin_memory);
5605}
5606namespace symint {
5607 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5609 return at::_ops::rand_generator::call(size, generator, dtype, layout, device, pin_memory);
5610 }
5611}
5612
5613// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5614inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size) {
5615 return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out);
5616}
5617namespace symint {
5618 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5619 at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size) {
5620 return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out);
5621 }
5622}
5623
5624// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5625inline at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor & out) {
5626 return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out);
5627}
5628namespace symint {
5629 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5630 at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor & out) {
5631 return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out);
5632 }
5633}
5634
5635// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5636inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size) {
5637 return at::_ops::rand_out::call(size, out);
5638}
5639namespace symint {
5640 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5641 at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size) {
5642 return at::_ops::rand_out::call(size, out);
5643 }
5644}
5645
5646// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5647inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) {
5648 return at::_ops::rand_out::call(size, out);
5649}
5650namespace symint {
5651 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5652 at::Tensor & rand_outf(c10::SymIntArrayRef size, at::Tensor & out) {
5653 return at::_ops::rand_out::call(size, out);
5654 }
5655}
5656
5657// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
5658inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator) {
5659 return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
5660}
5661namespace symint {
5662 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5663 at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator) {
5664 return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
5665 }
5666}
5667
5668// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
5669inline at::Tensor & rand_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
5670 return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
5671}
5672namespace symint {
5673 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5674 at::Tensor & rand_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
5675 return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
5676 }
5677}
5678
5679// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
5680inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) {
5681 return at::_ops::rand_generator_out::call(size, generator, out);
5682}
5683namespace symint {
5684 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5685 at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) {
5686 return at::_ops::rand_generator_out::call(size, generator, out);
5687 }
5688}
5689
5690// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
5691inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
5692 return at::_ops::rand_generator_out::call(size, generator, out);
5693}
5694namespace symint {
5695 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5696 at::Tensor & rand_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
5697 return at::_ops::rand_generator_out::call(size, generator, out);
5698 }
5699}
5700
5701// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
5702inline at::Tensor rand_like(const at::Tensor & self, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
5703 return at::_ops::rand_like::call(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
5704}
5705// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
5707 return at::_ops::rand_like::call(self, dtype, layout, device, pin_memory, memory_format);
5708}
5709
5710// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5711inline at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong) {
5712 return at::_ops::randint::call(high, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5713}
5714namespace symint {
5715 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5716 at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong) {
5717 return at::_ops::randint::call(high, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5718 }
5719}
5720
5721// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5723 return at::_ops::randint::call(high, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
5724}
5725namespace symint {
5726 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5728 return at::_ops::randint::call(high, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
5729 }
5730}
5731
5732// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5733inline at::Tensor randint_symint(int64_t high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong) {
5734 return at::_ops::randint::call(high, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5735}
5736namespace symint {
5737 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5738 at::Tensor randint(int64_t high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong) {
5739 return at::_ops::randint::call(high, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5740 }
5741}
5742
5743// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5745 return at::_ops::randint::call(high, size, dtype, layout, device, pin_memory);
5746}
5747namespace symint {
5748 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5750 return at::_ops::randint::call(high, size, dtype, layout, device, pin_memory);
5751 }
5752}
5753
5754// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5755inline at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
5756 return at::_ops::randint_generator::call(high, c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5757}
5758namespace symint {
5759 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5760 at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
5761 return at::_ops::randint_generator::call(high, c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5762 }
5763}
5764
5765// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5767 return at::_ops::randint_generator::call(high, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
5768}
5769namespace symint {
5770 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5772 return at::_ops::randint_generator::call(high, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
5773 }
5774}
5775
5776// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5777inline at::Tensor randint_symint(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
5778 return at::_ops::randint_generator::call(high, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5779}
5780namespace symint {
5781 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5782 at::Tensor randint(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
5783 return at::_ops::randint_generator::call(high, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5784 }
5785}
5786
5787// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5789 return at::_ops::randint_generator::call(high, size, generator, dtype, layout, device, pin_memory);
5790}
5791namespace symint {
5792 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5794 return at::_ops::randint_generator::call(high, size, generator, dtype, layout, device, pin_memory);
5795 }
5796}
5797
5798// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5799inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong) {
5800 return at::_ops::randint_low::call(low, high, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5801}
5802namespace symint {
5803 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5804 at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong) {
5805 return at::_ops::randint_low::call(low, high, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5806 }
5807}
5808
5809// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5810inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5811 return at::_ops::randint_low::call(low, high, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
5812}
5813namespace symint {
5814 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5815 at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5816 return at::_ops::randint_low::call(low, high, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
5817 }
5818}
5819
5820// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5821inline at::Tensor randint_symint(int64_t low, int64_t high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong) {
5822 return at::_ops::randint_low::call(low, high, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5823}
5824namespace symint {
5825 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5826 at::Tensor randint(int64_t low, int64_t high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong) {
5827 return at::_ops::randint_low::call(low, high, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5828 }
5829}
5830
5831// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5832inline at::Tensor randint_symint(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5833 return at::_ops::randint_low::call(low, high, size, dtype, layout, device, pin_memory);
5834}
5835namespace symint {
5836 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5837 at::Tensor randint(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5838 return at::_ops::randint_low::call(low, high, size, dtype, layout, device, pin_memory);
5839 }
5840}
5841
5842// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5843inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
5844 return at::_ops::randint_low_generator::call(low, high, c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5845}
5846namespace symint {
5847 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5848 at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
5849 return at::_ops::randint_low_generator::call(low, high, c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5850 }
5851}
5852
5853// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5854inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5855 return at::_ops::randint_low_generator::call(low, high, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
5856}
5857namespace symint {
5858 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5860 return at::_ops::randint_low_generator::call(low, high, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
5861 }
5862}
5863
5864// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5865inline at::Tensor randint_symint(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
5866 return at::_ops::randint_low_generator::call(low, high, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5867}
5868namespace symint {
5869 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5870 at::Tensor randint(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
5871 return at::_ops::randint_low_generator::call(low, high, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
5872 }
5873}
5874
5875// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5876inline at::Tensor randint_symint(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5877 return at::_ops::randint_low_generator::call(low, high, size, generator, dtype, layout, device, pin_memory);
5878}
5879namespace symint {
5880 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5881 at::Tensor randint(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5882 return at::_ops::randint_low_generator::call(low, high, size, generator, dtype, layout, device, pin_memory);
5883 }
5884}
5885
5886// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5887inline at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size) {
5888 return at::_ops::randint_out::call(high, c10::fromIntArrayRefSlow(size), out);
5889}
5890namespace symint {
5891 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5892 at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size) {
5893 return at::_ops::randint_out::call(high, c10::fromIntArrayRefSlow(size), out);
5894 }
5895}
5896
5897// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5898inline at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, at::Tensor & out) {
5899 return at::_ops::randint_out::call(high, c10::fromIntArrayRefSlow(size), out);
5900}
5901namespace symint {
5902 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5903 at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, at::Tensor & out) {
5904 return at::_ops::randint_out::call(high, c10::fromIntArrayRefSlow(size), out);
5905 }
5906}
5907
5908// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5909inline at::Tensor & randint_symint_out(at::Tensor & out, int64_t high, c10::SymIntArrayRef size) {
5910 return at::_ops::randint_out::call(high, size, out);
5911}
5912namespace symint {
5913 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5914 at::Tensor & randint_out(at::Tensor & out, int64_t high, c10::SymIntArrayRef size) {
5915 return at::_ops::randint_out::call(high, size, out);
5916 }
5917}
5918
5919// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5920inline at::Tensor & randint_symint_outf(int64_t high, c10::SymIntArrayRef size, at::Tensor & out) {
5921 return at::_ops::randint_out::call(high, size, out);
5922}
5923namespace symint {
5924 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5925 at::Tensor & randint_outf(int64_t high, c10::SymIntArrayRef size, at::Tensor & out) {
5926 return at::_ops::randint_out::call(high, size, out);
5927 }
5928}
5929
5930// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
5931inline at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator) {
5932 return at::_ops::randint_generator_out::call(high, c10::fromIntArrayRefSlow(size), generator, out);
5933}
5934namespace symint {
5935 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5936 at::Tensor & randint_out(at::Tensor & out, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator) {
5937 return at::_ops::randint_generator_out::call(high, c10::fromIntArrayRefSlow(size), generator, out);
5938 }
5939}
5940
5941// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
5942inline at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
5943 return at::_ops::randint_generator_out::call(high, c10::fromIntArrayRefSlow(size), generator, out);
5944}
5945namespace symint {
5946 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5947 at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
5948 return at::_ops::randint_generator_out::call(high, c10::fromIntArrayRefSlow(size), generator, out);
5949 }
5950}
5951
5952// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
5953inline at::Tensor & randint_symint_out(at::Tensor & out, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) {
5954 return at::_ops::randint_generator_out::call(high, size, generator, out);
5955}
5956namespace symint {
5957 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5958 at::Tensor & randint_out(at::Tensor & out, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) {
5959 return at::_ops::randint_generator_out::call(high, size, generator, out);
5960 }
5961}
5962
5963// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
5964inline at::Tensor & randint_symint_outf(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
5965 return at::_ops::randint_generator_out::call(high, size, generator, out);
5966}
5967namespace symint {
5968 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
5969 at::Tensor & randint_outf(int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
5970 return at::_ops::randint_generator_out::call(high, size, generator, out);
5971 }
5972}
5973
5974// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5975inline at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size) {
5976 return at::_ops::randint_low_out::call(low, high, c10::fromIntArrayRefSlow(size), out);
5977}
5978namespace symint {
5979 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5980 at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size) {
5981 return at::_ops::randint_low_out::call(low, high, c10::fromIntArrayRefSlow(size), out);
5982 }
5983}
5984
5985// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5986inline at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, at::Tensor & out) {
5987 return at::_ops::randint_low_out::call(low, high, c10::fromIntArrayRefSlow(size), out);
5988}
5989namespace symint {
5990 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
5991 at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, at::Tensor & out) {
5992 return at::_ops::randint_low_out::call(low, high, c10::fromIntArrayRefSlow(size), out);
5993 }
5994}
5995
5996// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
5997inline at::Tensor & randint_symint_out(at::Tensor & out, int64_t low, int64_t high, c10::SymIntArrayRef size) {
5998 return at::_ops::randint_low_out::call(low, high, size, out);
5999}
6000namespace symint {
6001 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6002 at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, c10::SymIntArrayRef size) {
6003 return at::_ops::randint_low_out::call(low, high, size, out);
6004 }
6005}
6006
6007// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
6008inline at::Tensor & randint_symint_outf(int64_t low, int64_t high, c10::SymIntArrayRef size, at::Tensor & out) {
6009 return at::_ops::randint_low_out::call(low, high, size, out);
6010}
6011namespace symint {
6012 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6013 at::Tensor & randint_outf(int64_t low, int64_t high, c10::SymIntArrayRef size, at::Tensor & out) {
6014 return at::_ops::randint_low_out::call(low, high, size, out);
6015 }
6016}
6017
6018// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
6019inline at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator) {
6020 return at::_ops::randint_low_generator_out::call(low, high, c10::fromIntArrayRefSlow(size), generator, out);
6021}
6022namespace symint {
6023 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6024 at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator) {
6025 return at::_ops::randint_low_generator_out::call(low, high, c10::fromIntArrayRefSlow(size), generator, out);
6026 }
6027}
6028
6029// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
6030inline at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
6031 return at::_ops::randint_low_generator_out::call(low, high, c10::fromIntArrayRefSlow(size), generator, out);
6032}
6033namespace symint {
6034 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6035 at::Tensor & randint_outf(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
6036 return at::_ops::randint_low_generator_out::call(low, high, c10::fromIntArrayRefSlow(size), generator, out);
6037 }
6038}
6039
6040// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
6041inline at::Tensor & randint_symint_out(at::Tensor & out, int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) {
6042 return at::_ops::randint_low_generator_out::call(low, high, size, generator, out);
6043}
6044namespace symint {
6045 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6046 at::Tensor & randint_out(at::Tensor & out, int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) {
6047 return at::_ops::randint_low_generator_out::call(low, high, size, generator, out);
6048 }
6049}
6050
6051// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
6052inline at::Tensor & randint_symint_outf(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
6053 return at::_ops::randint_low_generator_out::call(low, high, size, generator, out);
6054}
6055namespace symint {
6056 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6057 at::Tensor & randint_outf(int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
6058 return at::_ops::randint_low_generator_out::call(low, high, size, generator, out);
6059 }
6060}
6061
6062// aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
6063inline at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
6064 return at::_ops::randint_like::call(self, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
6065}
6066// aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
6068 return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format);
6069}
6070
6071// aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
6072inline at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
6073 return at::_ops::randint_like_low_dtype::call(self, low, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
6074}
6075// aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
6077 return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format);
6078}
6079
6080// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6081inline at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={}) {
6082 return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6083}
6084namespace symint {
6085 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6086 at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={}) {
6087 return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6088 }
6089}
6090
6091// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6093 return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
6094}
6095namespace symint {
6096 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6098 return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
6099 }
6100}
6101
6102// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6103inline at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) {
6104 return at::_ops::randn::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6105}
6106namespace symint {
6107 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6108 at::Tensor randn(c10::SymIntArrayRef size, at::TensorOptions options={}) {
6109 return at::_ops::randn::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6110 }
6111}
6112
6113// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6115 return at::_ops::randn::call(size, dtype, layout, device, pin_memory);
6116}
6117namespace symint {
6118 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6120 return at::_ops::randn::call(size, dtype, layout, device, pin_memory);
6121 }
6122}
6123
6124// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6125inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options={}) {
6126 return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6127}
6128namespace symint {
6129 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6130 at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options={}) {
6131 return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6132 }
6133}
6134
6135// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6137 return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
6138}
6139namespace symint {
6140 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6142 return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
6143 }
6144}
6145
6146// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6147inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options={}) {
6148 return at::_ops::randn_generator::call(size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6149}
6150namespace symint {
6151 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6152 at::Tensor randn(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options={}) {
6153 return at::_ops::randn_generator::call(size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6154 }
6155}
6156
6157// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6159 return at::_ops::randn_generator::call(size, generator, dtype, layout, device, pin_memory);
6160}
6161namespace symint {
6162 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6164 return at::_ops::randn_generator::call(size, generator, dtype, layout, device, pin_memory);
6165 }
6166}
6167
6168// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6169inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
6170 return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6171}
6172namespace symint {
6173 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6174 at::Tensor randn(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
6175 return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6176 }
6177}
6178
6179// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6181 return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory);
6182}
6183namespace symint {
6184 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6186 return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory);
6187 }
6188}
6189
6190// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6191inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
6192 return at::_ops::randn_names::call(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6193}
6194namespace symint {
6195 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6196 at::Tensor randn(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
6197 return at::_ops::randn_names::call(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6198 }
6199}
6200
6201// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6203 return at::_ops::randn_names::call(size, names, dtype, layout, device, pin_memory);
6204}
6205namespace symint {
6206 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6208 return at::_ops::randn_names::call(size, names, dtype, layout, device, pin_memory);
6209 }
6210}
6211
6212// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6213inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
6214 return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6215}
6216namespace symint {
6217 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6218 at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
6219 return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6220 }
6221}
6222
6223// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6225 return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory);
6226}
6227namespace symint {
6228 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6230 return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory);
6231 }
6232}
6233
6234// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6235inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
6236 return at::_ops::randn_generator_with_names::call(size, generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6237}
6238namespace symint {
6239 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6240 at::Tensor randn(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
6241 return at::_ops::randn_generator_with_names::call(size, generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6242 }
6243}
6244
6245// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6247 return at::_ops::randn_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory);
6248}
6249namespace symint {
6250 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6252 return at::_ops::randn_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory);
6253 }
6254}
6255
6256// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
6257inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size) {
6258 return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out);
6259}
6260namespace symint {
6261 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6262 at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size) {
6263 return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out);
6264 }
6265}
6266
6267// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
6268inline at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out) {
6269 return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out);
6270}
6271namespace symint {
6272 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6273 at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out) {
6274 return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out);
6275 }
6276}
6277
6278// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
6279inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size) {
6280 return at::_ops::randn_out::call(size, out);
6281}
6282namespace symint {
6283 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6284 at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size) {
6285 return at::_ops::randn_out::call(size, out);
6286 }
6287}
6288
6289// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
6290inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) {
6291 return at::_ops::randn_out::call(size, out);
6292}
6293namespace symint {
6294 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6295 at::Tensor & randn_outf(c10::SymIntArrayRef size, at::Tensor & out) {
6296 return at::_ops::randn_out::call(size, out);
6297 }
6298}
6299
6300// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
6301inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator) {
6302 return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
6303}
6304namespace symint {
6305 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6306 at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::Generator> generator) {
6307 return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
6308 }
6309}
6310
6311// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
6312inline at::Tensor & randn_outf(at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
6313 return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
6314}
6315namespace symint {
6316 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6318 return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out);
6319 }
6320}
6321
6322// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
6323inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) {
6324 return at::_ops::randn_generator_out::call(size, generator, out);
6325}
6326namespace symint {
6327 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6328 at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::Generator> generator) {
6329 return at::_ops::randn_generator_out::call(size, generator, out);
6330 }
6331}
6332
6333// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
6334inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
6335 return at::_ops::randn_generator_out::call(size, generator, out);
6336}
6337namespace symint {
6338 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6339 at::Tensor & randn_outf(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
6340 return at::_ops::randn_generator_out::call(size, generator, out);
6341 }
6342}
6343
6344// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
6345inline at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
6346 return at::_ops::randn_like::call(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
6347}
6348// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
6350 return at::_ops::randn_like::call(self, dtype, layout, device, pin_memory, memory_format);
6351}
6352
6353// aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6354inline at::Tensor randperm(int64_t n, at::TensorOptions options=at::kLong) {
6355 return at::_ops::randperm::call(n, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6356}
6357// aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6359 return at::_ops::randperm::call(n, dtype, layout, device, pin_memory);
6360}
6361
6362// aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6363inline at::Tensor randperm(int64_t n, c10::optional<at::Generator> generator, at::TensorOptions options=at::kLong) {
6364 return at::_ops::randperm_generator::call(n, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6365}
6366// aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6368 return at::_ops::randperm_generator::call(n, generator, dtype, layout, device, pin_memory);
6369}
6370
6371// aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)
6372inline at::Tensor & randperm_out(at::Tensor & out, int64_t n) {
6373 return at::_ops::randperm_out::call(n, out);
6374}
6375// aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)
6376inline at::Tensor & randperm_outf(int64_t n, at::Tensor & out) {
6377 return at::_ops::randperm_out::call(n, out);
6378}
6379
6380// aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
6382 return at::_ops::randperm_generator_out::call(n, generator, out);
6383}
6384// aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
6386 return at::_ops::randperm_generator_out::call(n, generator, out);
6387}
6388
6389// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6390inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step=1, at::TensorOptions options={}) {
6391 return at::_ops::range_step::call(start, end, step, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6392}
6393// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6394inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6395 return at::_ops::range_step::call(start, end, step, dtype, layout, device, pin_memory);
6396}
6397
6398// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6399inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options={}) {
6400 return at::_ops::range::call(start, end, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
6401}
6402// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
6403inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
6404 return at::_ops::range::call(start, end, dtype, layout, device, pin_memory);
6405}
6406
6407// aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
6408inline at::Tensor & range_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end) {
6409 return at::_ops::range_out_::call(start, end, out);
6410}
6411// aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)
6412inline at::Tensor & range_outf(const at::Scalar & start, const at::Scalar & end, at::Tensor & out) {
6413 return at::_ops::range_out_::call(start, end, out);
6414}
6415
6416// aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
6417inline at::Tensor & range_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step) {
6418 return at::_ops::range_out::call(start, end, step, out);
6419}
6420// aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
6421inline at::Tensor & range_outf(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
6422 return at::_ops::range_out::call(start, end, step, out);
6423}
6424
6425// aten::ravel(Tensor(a) self) -> Tensor(a)
6426inline at::Tensor ravel(const at::Tensor & self) {
6427 return at::_ops::ravel::call(self);
6428}
6429
6430// aten::reciprocal(Tensor self) -> Tensor
6431inline at::Tensor reciprocal(const at::Tensor & self) {
6432 return at::_ops::reciprocal::call(self);
6433}
6434
6435// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)
6437 return at::_ops::reciprocal_::call(self);
6438}
6439
6440// aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6441inline at::Tensor & reciprocal_out(at::Tensor & out, const at::Tensor & self) {
6442 return at::_ops::reciprocal_out::call(self, out);
6443}
6444// aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6445inline at::Tensor & reciprocal_outf(const at::Tensor & self, at::Tensor & out) {
6446 return at::_ops::reciprocal_out::call(self, out);
6447}
6448
6449// aten::neg(Tensor self) -> Tensor
6450inline at::Tensor neg(const at::Tensor & self) {
6451 return at::_ops::neg::call(self);
6452}
6453
6454// aten::neg_(Tensor(a!) self) -> Tensor(a!)
6455inline at::Tensor & neg_(at::Tensor & self) {
6456 return at::_ops::neg_::call(self);
6457}
6458
6459// aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6460inline at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self) {
6461 return at::_ops::neg_out::call(self, out);
6462}
6463// aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6464inline at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out) {
6465 return at::_ops::neg_out::call(self, out);
6466}
6467
6468// aten::negative(Tensor self) -> Tensor
6469inline at::Tensor negative(const at::Tensor & self) {
6470 return at::_ops::negative::call(self);
6471}
6472
6473// aten::negative_(Tensor(a!) self) -> Tensor(a!)
6475 return at::_ops::negative_::call(self);
6476}
6477
6478// aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6479inline at::Tensor & negative_out(at::Tensor & out, const at::Tensor & self) {
6480 return at::_ops::negative_out::call(self, out);
6481}
6482// aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6483inline at::Tensor & negative_outf(const at::Tensor & self, at::Tensor & out) {
6484 return at::_ops::negative_out::call(self, out);
6485}
6486
6487namespace symint {
6488 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6489 at::Tensor repeat(const at::Tensor & self, at::IntArrayRef repeats) {
6490 return at::_ops::repeat::call(self, c10::fromIntArrayRefSlow(repeats));
6491 }
6492}
6493
6494namespace symint {
6495 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6496 at::Tensor repeat(const at::Tensor & self, c10::SymIntArrayRef repeats) {
6497 return at::_ops::repeat::call(self, repeats);
6498 }
6499}
6500
6501// aten::repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor
6503 return at::_ops::repeat_interleave_Tensor::call(repeats, output_size);
6504}
6505
6506// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor
6508 return at::_ops::repeat_interleave_self_Tensor::call(self, repeats, dim, output_size);
6509}
6510
6511// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor
6513 return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size);
6514}
6515namespace symint {
6516 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6518 return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size);
6519 }
6520}
6521
6522// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor
6524 return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size);
6525}
6526namespace symint {
6527 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6529 return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size);
6530 }
6531}
6532
6533// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
6534inline at::Tensor reshape(const at::Tensor & self, at::IntArrayRef shape) {
6535 return at::_ops::reshape::call(self, c10::fromIntArrayRefSlow(shape));
6536}
6537namespace symint {
6538 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6539 at::Tensor reshape(const at::Tensor & self, at::IntArrayRef shape) {
6540 return at::_ops::reshape::call(self, c10::fromIntArrayRefSlow(shape));
6541 }
6542}
6543
6544// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)
6545inline at::Tensor reshape_symint(const at::Tensor & self, c10::SymIntArrayRef shape) {
6546 return at::_ops::reshape::call(self, shape);
6547}
6548namespace symint {
6549 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6550 at::Tensor reshape(const at::Tensor & self, c10::SymIntArrayRef shape) {
6551 return at::_ops::reshape::call(self, shape);
6552 }
6553}
6554
6555// aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor
6556inline at::Tensor _reshape_copy(const at::Tensor & self, at::IntArrayRef size) {
6557 return at::_ops::_reshape_copy::call(self, c10::fromIntArrayRefSlow(size));
6558}
6559namespace symint {
6560 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6561 at::Tensor _reshape_copy(const at::Tensor & self, at::IntArrayRef size) {
6562 return at::_ops::_reshape_copy::call(self, c10::fromIntArrayRefSlow(size));
6563 }
6564}
6565
6566// aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor
6567inline at::Tensor _reshape_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
6568 return at::_ops::_reshape_copy::call(self, size);
6569}
6570namespace symint {
6571 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6572 at::Tensor _reshape_copy(const at::Tensor & self, c10::SymIntArrayRef size) {
6573 return at::_ops::_reshape_copy::call(self, size);
6574 }
6575}
6576
6577// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
6578inline at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
6579 return at::_ops::_reshape_alias::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
6580}
6581namespace symint {
6582 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6583 at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
6584 return at::_ops::_reshape_alias::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
6585 }
6586}
6587
6588// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
6589inline at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
6590 return at::_ops::_reshape_alias::call(self, size, stride);
6591}
6592namespace symint {
6593 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6594 at::Tensor _reshape_alias(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
6595 return at::_ops::_reshape_alias::call(self, size, stride);
6596 }
6597}
6598
6599// aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor
6600inline at::Tensor _mkldnn_reshape(const at::Tensor & self, at::IntArrayRef shape) {
6601 return at::_ops::_mkldnn_reshape::call(self, shape);
6602}
6603
6604// aten::round(Tensor self) -> Tensor
6605inline at::Tensor round(const at::Tensor & self) {
6606 return at::_ops::round::call(self);
6607}
6608
6609// aten::round_(Tensor(a!) self) -> Tensor(a!)
6610inline at::Tensor & round_(at::Tensor & self) {
6611 return at::_ops::round_::call(self);
6612}
6613
6614// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6615inline at::Tensor & round_out(at::Tensor & out, const at::Tensor & self) {
6616 return at::_ops::round_out::call(self, out);
6617}
6618// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6619inline at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out) {
6620 return at::_ops::round_out::call(self, out);
6621}
6622
6623// aten::round.decimals(Tensor self, *, int decimals) -> Tensor
6624inline at::Tensor round(const at::Tensor & self, int64_t decimals) {
6625 return at::_ops::round_decimals::call(self, decimals);
6626}
6627
6628// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)
6629inline at::Tensor & round_(at::Tensor & self, int64_t decimals) {
6630 return at::_ops::round__decimals::call(self, decimals);
6631}
6632
6633// aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)
6634inline at::Tensor & round_out(at::Tensor & out, const at::Tensor & self, int64_t decimals) {
6635 return at::_ops::round_decimals_out::call(self, decimals, out);
6636}
6637// aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)
6638inline at::Tensor & round_outf(const at::Tensor & self, int64_t decimals, at::Tensor & out) {
6639 return at::_ops::round_decimals_out::call(self, decimals, out);
6640}
6641
6642// aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
6643inline at::Tensor rrelu(const at::Tensor & self, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional<at::Generator> generator=c10::nullopt) {
6644 return at::_ops::rrelu::call(self, lower, upper, training, generator);
6645}
6646
6647// aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
6648inline at::Tensor & rrelu_(at::Tensor & self, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional<at::Generator> generator=c10::nullopt) {
6649 return at::_ops::rrelu_::call(self, lower, upper, training, generator);
6650}
6651
6652// aten::relu(Tensor self) -> Tensor
6653inline at::Tensor relu(const at::Tensor & self) {
6654 return at::_ops::relu::call(self);
6655}
6656
6657// aten::relu_(Tensor(a!) self) -> Tensor(a!)
6658inline at::Tensor & relu_(at::Tensor & self) {
6659 return at::_ops::relu_::call(self);
6660}
6661
6662// aten::relu6(Tensor self) -> Tensor
6663inline at::Tensor relu6(const at::Tensor & self) {
6664 return at::_ops::relu6::call(self);
6665}
6666
6667// aten::relu6_(Tensor(a!) self) -> Tensor(a!)
6668inline at::Tensor & relu6_(at::Tensor & self) {
6669 return at::_ops::relu6_::call(self);
6670}
6671
6672// aten::prelu(Tensor self, Tensor weight) -> Tensor
6673inline at::Tensor prelu(const at::Tensor & self, const at::Tensor & weight) {
6674 return at::_ops::prelu::call(self, weight);
6675}
6676
6677// aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor
6678inline at::Tensor _prelu_kernel(const at::Tensor & self, const at::Tensor & weight) {
6679 return at::_ops::_prelu_kernel::call(self, weight);
6680}
6681
6682// aten::_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
6683inline ::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
6684 return at::_ops::_prelu_kernel_backward::call(grad_output, self, weight);
6685}
6686
6687// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
6688inline at::Tensor & gelu_out(at::Tensor & out, const at::Tensor & self, c10::string_view approximate="none") {
6689 return at::_ops::gelu_out::call(self, approximate, out);
6690}
6691// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)
6692inline at::Tensor & gelu_outf(const at::Tensor & self, c10::string_view approximate, at::Tensor & out) {
6693 return at::_ops::gelu_out::call(self, approximate, out);
6694}
6695
6696// aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)
6697inline at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate="none") {
6698 return at::_ops::gelu_::call(self, approximate);
6699}
6700
6701// aten::gelu(Tensor self, *, str approximate='none') -> Tensor
6702inline at::Tensor gelu(const at::Tensor & self, c10::string_view approximate="none") {
6703 return at::_ops::gelu::call(self, approximate);
6704}
6705
6706// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
6707inline at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none") {
6708 return at::_ops::gelu_backward_grad_input::call(grad_output, self, approximate, grad_input);
6709}
6710// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)
6711inline at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) {
6712 return at::_ops::gelu_backward_grad_input::call(grad_output, self, approximate, grad_input);
6713}
6714
6715// aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor
6716inline at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none") {
6717 return at::_ops::gelu_backward::call(grad_output, self, approximate);
6718}
6719
6720// aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor
6722 return at::_ops::infinitely_differentiable_gelu_backward::call(grad, self);
6723}
6724
6725// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
6726inline at::Tensor & hardshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5) {
6727 return at::_ops::hardshrink_out::call(self, lambd, out);
6728}
6729// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
6730inline at::Tensor & hardshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
6731 return at::_ops::hardshrink_out::call(self, lambd, out);
6732}
6733
6734// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
6735inline at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5) {
6736 return at::_ops::hardshrink::call(self, lambd);
6737}
6738
6739// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
6740inline at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
6741 return at::_ops::hardshrink_backward_grad_input::call(grad_out, self, lambd, grad_input);
6742}
6743// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
6744inline at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
6745 return at::_ops::hardshrink_backward_grad_input::call(grad_out, self, lambd, grad_input);
6746}
6747
6748// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor
6749inline at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
6750 return at::_ops::hardshrink_backward::call(grad_out, self, lambd);
6751}
6752
6753// aten::rsqrt(Tensor self) -> Tensor
6754inline at::Tensor rsqrt(const at::Tensor & self) {
6755 return at::_ops::rsqrt::call(self);
6756}
6757
6758// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)
6759inline at::Tensor & rsqrt_(at::Tensor & self) {
6760 return at::_ops::rsqrt_::call(self);
6761}
6762
6763// aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6764inline at::Tensor & rsqrt_out(at::Tensor & out, const at::Tensor & self) {
6765 return at::_ops::rsqrt_out::call(self, out);
6766}
6767// aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6768inline at::Tensor & rsqrt_outf(const at::Tensor & self, at::Tensor & out) {
6769 return at::_ops::rsqrt_out::call(self, out);
6770}
6771
6772// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
6773inline at::Tensor select(const at::Tensor & self, at::Dimname dim, int64_t index) {
6774 return at::_ops::select_Dimname::call(self, dim, index);
6775}
6776
6777// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
6778inline at::Tensor select(const at::Tensor & self, int64_t dim, int64_t index) {
6779 return at::_ops::select_int::call(self, dim, index);
6780}
6781namespace symint {
6782 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6783 at::Tensor select(const at::Tensor & self, int64_t dim, int64_t index) {
6784 return at::_ops::select_int::call(self, dim, index);
6785 }
6786}
6787
6788// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)
6789inline at::Tensor select_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) {
6790 return at::_ops::select_int::call(self, dim, index);
6791}
6792namespace symint {
6793 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6794 at::Tensor select(const at::Tensor & self, int64_t dim, c10::SymInt index) {
6795 return at::_ops::select_int::call(self, dim, index);
6796 }
6797}
6798
6799// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor
6800inline at::Tensor select_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) {
6801 return at::_ops::select_backward::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index);
6802}
6803namespace symint {
6804 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6805 at::Tensor select_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) {
6806 return at::_ops::select_backward::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index);
6807 }
6808}
6809
6810// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor
6811inline at::Tensor select_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
6812 return at::_ops::select_backward::call(grad_output, input_sizes, dim, index);
6813}
6814namespace symint {
6815 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6816 at::Tensor select_backward(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
6817 return at::_ops::select_backward::call(grad_output, input_sizes, dim, index);
6818 }
6819}
6820
6821// aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor
6822inline at::Tensor _nested_select_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, int64_t index) {
6823 return at::_ops::_nested_select_backward::call(grad_output, self, dim, index);
6824}
6825namespace symint {
6826 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
6827 at::Tensor _nested_select_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, int64_t index) {
6828 return at::_ops::_nested_select_backward::call(grad_output, self, dim, index);
6829 }
6830}
6831
6832// aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor
6833inline at::Tensor _nested_select_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) {
6834 return at::_ops::_nested_select_backward::call(grad_output, self, dim, index);
6835}
6836namespace symint {
6837 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
6838 at::Tensor _nested_select_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) {
6839 return at::_ops::_nested_select_backward::call(grad_output, self, dim, index);
6840 }
6841}
6842
6843// aten::selu(Tensor self) -> Tensor
6844inline at::Tensor selu(const at::Tensor & self) {
6845 return at::_ops::selu::call(self);
6846}
6847
6848// aten::selu_(Tensor(a!) self) -> Tensor(a!)
6849inline at::Tensor & selu_(at::Tensor & self) {
6850 return at::_ops::selu_::call(self);
6851}
6852
6853// aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor
6854inline at::Tensor celu(const at::Tensor & self, const at::Scalar & alpha=1.0) {
6855 return at::_ops::celu::call(self, alpha);
6856}
6857
6858// aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
6859inline at::Tensor & celu_(at::Tensor & self, const at::Scalar & alpha=1.0) {
6860 return at::_ops::celu_::call(self, alpha);
6861}
6862
6863// aten::silu(Tensor self) -> Tensor
6864inline at::Tensor silu(const at::Tensor & self) {
6865 return at::_ops::silu::call(self);
6866}
6867
6868// aten::silu_(Tensor(a!) self) -> Tensor(a!)
6869inline at::Tensor & silu_(at::Tensor & self) {
6870 return at::_ops::silu_::call(self);
6871}
6872
6873// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6874inline at::Tensor & silu_out(at::Tensor & out, const at::Tensor & self) {
6875 return at::_ops::silu_out::call(self, out);
6876}
6877// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6878inline at::Tensor & silu_outf(const at::Tensor & self, at::Tensor & out) {
6879 return at::_ops::silu_out::call(self, out);
6880}
6881
6882// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
6883inline at::Tensor & silu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) {
6884 return at::_ops::silu_backward_grad_input::call(grad_output, self, grad_input);
6885}
6886// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
6887inline at::Tensor & silu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
6888 return at::_ops::silu_backward_grad_input::call(grad_output, self, grad_input);
6889}
6890
6891// aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor
6892inline at::Tensor silu_backward(const at::Tensor & grad_output, const at::Tensor & self) {
6893 return at::_ops::silu_backward::call(grad_output, self);
6894}
6895
6896// aten::mish(Tensor self) -> Tensor
6897inline at::Tensor mish(const at::Tensor & self) {
6898 return at::_ops::mish::call(self);
6899}
6900
6901// aten::mish_(Tensor(a!) self) -> Tensor(a!)
6902inline at::Tensor & mish_(at::Tensor & self) {
6903 return at::_ops::mish_::call(self);
6904}
6905
6906// aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6907inline at::Tensor & mish_out(at::Tensor & out, const at::Tensor & self) {
6908 return at::_ops::mish_out::call(self, out);
6909}
6910// aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6911inline at::Tensor & mish_outf(const at::Tensor & self, at::Tensor & out) {
6912 return at::_ops::mish_out::call(self, out);
6913}
6914
6915// aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor
6916inline at::Tensor mish_backward(const at::Tensor & grad_output, const at::Tensor & self) {
6917 return at::_ops::mish_backward::call(grad_output, self);
6918}
6919
6920// aten::sigmoid(Tensor self) -> Tensor
6921inline at::Tensor sigmoid(const at::Tensor & self) {
6922 return at::_ops::sigmoid::call(self);
6923}
6924
6925// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)
6927 return at::_ops::sigmoid_::call(self);
6928}
6929
6930// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6931inline at::Tensor & sigmoid_out(at::Tensor & out, const at::Tensor & self) {
6932 return at::_ops::sigmoid_out::call(self, out);
6933}
6934// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6935inline at::Tensor & sigmoid_outf(const at::Tensor & self, at::Tensor & out) {
6936 return at::_ops::sigmoid_out::call(self, out);
6937}
6938
6939// aten::logit(Tensor self, float? eps=None) -> Tensor
6941 return at::_ops::logit::call(self, eps);
6942}
6943
6944// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)
6946 return at::_ops::logit_::call(self, eps);
6947}
6948
6949// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
6951 return at::_ops::logit_out::call(self, eps, out);
6952}
6953// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
6955 return at::_ops::logit_out::call(self, eps, out);
6956}
6957
6958// aten::sin(Tensor self) -> Tensor
6959inline at::Tensor sin(const at::Tensor & self) {
6960 return at::_ops::sin::call(self);
6961}
6962
6963// aten::sin_(Tensor(a!) self) -> Tensor(a!)
6964inline at::Tensor & sin_(at::Tensor & self) {
6965 return at::_ops::sin_::call(self);
6966}
6967
6968// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6969inline at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) {
6970 return at::_ops::sin_out::call(self, out);
6971}
6972// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6973inline at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) {
6974 return at::_ops::sin_out::call(self, out);
6975}
6976
6977// aten::sinc(Tensor self) -> Tensor
6978inline at::Tensor sinc(const at::Tensor & self) {
6979 return at::_ops::sinc::call(self);
6980}
6981
6982// aten::sinc_(Tensor(a!) self) -> Tensor(a!)
6983inline at::Tensor & sinc_(at::Tensor & self) {
6984 return at::_ops::sinc_::call(self);
6985}
6986
6987// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6988inline at::Tensor & sinc_out(at::Tensor & out, const at::Tensor & self) {
6989 return at::_ops::sinc_out::call(self, out);
6990}
6991// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
6992inline at::Tensor & sinc_outf(const at::Tensor & self, at::Tensor & out) {
6993 return at::_ops::sinc_out::call(self, out);
6994}
6995
6996// aten::sinh(Tensor self) -> Tensor
6997inline at::Tensor sinh(const at::Tensor & self) {
6998 return at::_ops::sinh::call(self);
6999}
7000
7001// aten::sinh_(Tensor(a!) self) -> Tensor(a!)
7002inline at::Tensor & sinh_(at::Tensor & self) {
7003 return at::_ops::sinh_::call(self);
7004}
7005
7006// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7007inline at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self) {
7008 return at::_ops::sinh_out::call(self, out);
7009}
7010// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7011inline at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out) {
7012 return at::_ops::sinh_out::call(self, out);
7013}
7014
7015// aten::detach(Tensor(a) self) -> Tensor(a)
7016inline at::Tensor detach(const at::Tensor & self) {
7017 return at::_ops::detach::call(self);
7018}
7019
7020// aten::detach_(Tensor(a!) self) -> Tensor(a!)
7022 return at::_ops::detach_::call(self);
7023}
7024
7025// aten::size.int(Tensor self, int dim) -> int
7026inline int64_t __dispatch_size(const at::Tensor & self, int64_t dim) {
7027 return at::_ops::size_int::call(self, dim);
7028}
7029
7030// aten::size.Dimname(Tensor self, Dimname dim) -> int
7031inline int64_t size(const at::Tensor & self, at::Dimname dim) {
7032 return at::_ops::size_Dimname::call(self, dim);
7033}
7034
7035// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
7036inline at::Tensor slice(const at::Tensor & self, int64_t dim=0, c10::optional<int64_t> start=c10::nullopt, c10::optional<int64_t> end=c10::nullopt, int64_t step=1) {
7037 return at::_ops::slice_Tensor::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step);
7038}
7039namespace symint {
7040 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
7041 at::Tensor slice(const at::Tensor & self, int64_t dim=0, c10::optional<int64_t> start=c10::nullopt, c10::optional<int64_t> end=c10::nullopt, int64_t step=1) {
7042 return at::_ops::slice_Tensor::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step);
7043 }
7044}
7045
7046// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)
7047inline at::Tensor slice_symint(const at::Tensor & self, int64_t dim=0, c10::optional<c10::SymInt> start=c10::nullopt, c10::optional<c10::SymInt> end=c10::nullopt, c10::SymInt step=1) {
7048 return at::_ops::slice_Tensor::call(self, dim, start, end, step);
7049}
7050namespace symint {
7051 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
7053 return at::_ops::slice_Tensor::call(self, dim, start, end, step);
7054 }
7055}
7056
7057// aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
7058inline at::Tensor slice_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step) {
7059 return at::_ops::slice_backward::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, start, end, step);
7060}
7061namespace symint {
7062 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
7063 at::Tensor slice_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step) {
7064 return at::_ops::slice_backward::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, start, end, step);
7065 }
7066}
7067
7068// aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor
7069inline at::Tensor slice_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
7070 return at::_ops::slice_backward::call(grad_output, input_sizes, dim, start, end, step);
7071}
7072namespace symint {
7073 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
7074 at::Tensor slice_backward(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
7075 return at::_ops::slice_backward::call(grad_output, input_sizes, dim, start, end, step);
7076 }
7077}
7078
7079// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
7080inline at::Tensor slice_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional<int64_t> start=c10::nullopt, c10::optional<int64_t> end=c10::nullopt, int64_t step=1) {
7081 return at::_ops::slice_scatter::call(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step);
7082}
7083namespace symint {
7084 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
7085 at::Tensor slice_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional<int64_t> start=c10::nullopt, c10::optional<int64_t> end=c10::nullopt, int64_t step=1) {
7086 return at::_ops::slice_scatter::call(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step);
7087 }
7088}
7089
7090// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
7091inline at::Tensor slice_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional<c10::SymInt> start=c10::nullopt, c10::optional<c10::SymInt> end=c10::nullopt, c10::SymInt step=1) {
7092 return at::_ops::slice_scatter::call(self, src, dim, start, end, step);
7093}
7094namespace symint {
7095 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
7096 at::Tensor slice_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional<c10::SymInt> start=c10::nullopt, c10::optional<c10::SymInt> end=c10::nullopt, c10::SymInt step=1) {
7097 return at::_ops::slice_scatter::call(self, src, dim, start, end, step);
7098 }
7099}
7100
7101// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
7102inline at::Tensor select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index) {
7103 return at::_ops::select_scatter::call(self, src, dim, index);
7104}
7105namespace symint {
7106 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
7107 at::Tensor select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index) {
7108 return at::_ops::select_scatter::call(self, src, dim, index);
7109 }
7110}
7111
7112// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor
7113inline at::Tensor select_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
7114 return at::_ops::select_scatter::call(self, src, dim, index);
7115}
7116namespace symint {
7117 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
7118 at::Tensor select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
7119 return at::_ops::select_scatter::call(self, src, dim, index);
7120 }
7121}
7122
7123// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor
7124inline at::Tensor diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) {
7125 return at::_ops::diagonal_scatter::call(self, src, offset, dim1, dim2);
7126}
7127
7128// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
7129inline at::Tensor as_strided_scatter(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) {
7130 return at::_ops::as_strided_scatter::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
7131}
7132namespace symint {
7133 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
7134 at::Tensor as_strided_scatter(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) {
7135 return at::_ops::as_strided_scatter::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
7136 }
7137}
7138
7139// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
7140inline at::Tensor as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) {
7141 return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset);
7142}
7143namespace symint {
7144 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
7145 at::Tensor as_strided_scatter(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) {
7146 return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset);
7147 }
7148}
7149
7150// aten::smm(Tensor self, Tensor mat2) -> Tensor
7151inline at::Tensor smm(const at::Tensor & self, const at::Tensor & mat2) {
7152 return at::_ops::smm::call(self, mat2);
7153}
7154
7155// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
7157 return at::_ops::softmax_int::call(self, dim, dtype);
7158}
7159
7160// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
7162 return at::_ops::softmax_int_out::call(self, dim, dtype, out);
7163}
7164// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
7165inline at::Tensor & softmax_outf(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
7166 return at::_ops::softmax_int_out::call(self, dim, dtype, out);
7167}
7168
7169// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
7170inline at::Tensor softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype=c10::nullopt) {
7171 return at::_ops::softmax_Dimname::call(self, dim, dtype);
7172}
7173
7174// aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
7175inline at::Tensor _softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
7176 return at::_ops::_softmax::call(self, dim, half_to_float);
7177}
7178
7179// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
7180inline at::Tensor & _softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) {
7181 return at::_ops::_softmax_out::call(self, dim, half_to_float, out);
7182}
7183// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
7184inline at::Tensor & _softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
7185 return at::_ops::_softmax_out::call(self, dim, half_to_float, out);
7186}
7187
7188// aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
7189inline at::Tensor _softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
7190 return at::_ops::_softmax_backward_data::call(grad_output, output, dim, input_dtype);
7191}
7192
7193// aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
7194inline at::Tensor & _softmax_backward_data_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
7195 return at::_ops::_softmax_backward_data_out::call(grad_output, output, dim, input_dtype, grad_input);
7196}
7197// aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
7198inline at::Tensor & _softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) {
7199 return at::_ops::_softmax_backward_data_out::call(grad_output, output, dim, input_dtype, grad_input);
7200}
7201
7202// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
7203inline ::std::vector<at::Tensor> unsafe_split(const at::Tensor & self, int64_t split_size, int64_t dim=0) {
7204 return at::_ops::unsafe_split_Tensor::call(self, split_size, dim);
7205}
7206namespace symint {
7207 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
7208 ::std::vector<at::Tensor> unsafe_split(const at::Tensor & self, int64_t split_size, int64_t dim=0) {
7209 return at::_ops::unsafe_split_Tensor::call(self, split_size, dim);
7210 }
7211}
7212
7213// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
7214inline ::std::vector<at::Tensor> unsafe_split_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) {
7215 return at::_ops::unsafe_split_Tensor::call(self, split_size, dim);
7216}
7217namespace symint {
7218 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
7219 ::std::vector<at::Tensor> unsafe_split(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) {
7220 return at::_ops::unsafe_split_Tensor::call(self, split_size, dim);
7221 }
7222}
7223
7224// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
7225inline ::std::vector<at::Tensor> split(const at::Tensor & self, int64_t split_size, int64_t dim=0) {
7226 return at::_ops::split_Tensor::call(self, split_size, dim);
7227}
7228namespace symint {
7229 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
7230 ::std::vector<at::Tensor> split(const at::Tensor & self, int64_t split_size, int64_t dim=0) {
7231 return at::_ops::split_Tensor::call(self, split_size, dim);
7232 }
7233}
7234
7235// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]
7236inline ::std::vector<at::Tensor> split_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) {
7237 return at::_ops::split_Tensor::call(self, split_size, dim);
7238}
7239namespace symint {
7240 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
7241 ::std::vector<at::Tensor> split(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) {
7242 return at::_ops::split_Tensor::call(self, split_size, dim);
7243 }
7244}
7245
7246// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
7247inline ::std::vector<at::Tensor> split(const at::Tensor & self, at::IntArrayRef split_size, int64_t dim=0) {
7248 return at::_ops::split_sizes::call(self, c10::fromIntArrayRefSlow(split_size), dim);
7249}
7250namespace symint {
7251 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
7252 ::std::vector<at::Tensor> split(const at::Tensor & self, at::IntArrayRef split_size, int64_t dim=0) {
7253 return at::_ops::split_sizes::call(self, c10::fromIntArrayRefSlow(split_size), dim);
7254 }
7255}
7256
7257// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]
7258inline ::std::vector<at::Tensor> split_symint(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim=0) {
7259 return at::_ops::split_sizes::call(self, split_size, dim);
7260}
7261namespace symint {
7262 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
7263 ::std::vector<at::Tensor> split(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim=0) {
7264 return at::_ops::split_sizes::call(self, split_size, dim);
7265 }
7266}
7267
7268// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
7269inline ::std::vector<at::Tensor> unsafe_split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) {
7270 return at::_ops::unsafe_split_with_sizes::call(self, c10::fromIntArrayRefSlow(split_sizes), dim);
7271}
7272namespace symint {
7273 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
7274 ::std::vector<at::Tensor> unsafe_split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) {
7275 return at::_ops::unsafe_split_with_sizes::call(self, c10::fromIntArrayRefSlow(split_sizes), dim);
7276 }
7277}
7278
7279// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
7280inline ::std::vector<at::Tensor> unsafe_split_with_sizes_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) {
7281 return at::_ops::unsafe_split_with_sizes::call(self, split_sizes, dim);
7282}
7283namespace symint {
7284 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
7285 ::std::vector<at::Tensor> unsafe_split_with_sizes(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) {
7286 return at::_ops::unsafe_split_with_sizes::call(self, split_sizes, dim);
7287 }
7288}
7289
7290// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
7291inline ::std::vector<at::Tensor> split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) {
7292 return at::_ops::split_with_sizes::call(self, c10::fromIntArrayRefSlow(split_sizes), dim);
7293}
7294namespace symint {
7295 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
7296 ::std::vector<at::Tensor> split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) {
7297 return at::_ops::split_with_sizes::call(self, c10::fromIntArrayRefSlow(split_sizes), dim);
7298 }
7299}
7300
7301// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]
7302inline ::std::vector<at::Tensor> split_with_sizes_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) {
7303 return at::_ops::split_with_sizes::call(self, split_sizes, dim);
7304}
7305namespace symint {
7306 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
7307 ::std::vector<at::Tensor> split_with_sizes(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) {
7308 return at::_ops::split_with_sizes::call(self, split_sizes, dim);
7309 }
7310}
7311
7312// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
7313inline ::std::vector<at::Tensor> hsplit(const at::Tensor & self, int64_t sections) {
7314 return at::_ops::hsplit_int::call(self, sections);
7315}
7316
7317// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
7318inline ::std::vector<at::Tensor> hsplit(const at::Tensor & self, at::IntArrayRef indices) {
7319 return at::_ops::hsplit_array::call(self, indices);
7320}
7321
7322// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
7323inline ::std::vector<at::Tensor> vsplit(const at::Tensor & self, int64_t sections) {
7324 return at::_ops::vsplit_int::call(self, sections);
7325}
7326
7327// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
7328inline ::std::vector<at::Tensor> vsplit(const at::Tensor & self, at::IntArrayRef indices) {
7329 return at::_ops::vsplit_array::call(self, indices);
7330}
7331
7332// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]
7333inline ::std::vector<at::Tensor> dsplit(const at::Tensor & self, int64_t sections) {
7334 return at::_ops::dsplit_int::call(self, sections);
7335}
7336
7337// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]
7338inline ::std::vector<at::Tensor> dsplit(const at::Tensor & self, at::IntArrayRef indices) {
7339 return at::_ops::dsplit_array::call(self, indices);
7340}
7341
7342// aten::squeeze(Tensor(a) self) -> Tensor(a)
7343inline at::Tensor squeeze(const at::Tensor & self) {
7344 return at::_ops::squeeze::call(self);
7345}
7346
7347// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
7348inline at::Tensor squeeze(const at::Tensor & self, int64_t dim) {
7349 return at::_ops::squeeze_dim::call(self, dim);
7350}
7351
7352// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
7353inline at::Tensor squeeze(const at::Tensor & self, at::Dimname dim) {
7354 return at::_ops::squeeze_dimname::call(self, dim);
7355}
7356
7357// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)
7358inline at::Tensor squeeze(const at::Tensor & self, at::IntArrayRef dim) {
7359 return at::_ops::squeeze_dims::call(self, dim);
7360}
7361
7362// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
7363inline at::Tensor sspaddmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
7364 return at::_ops::sspaddmm::call(self, mat1, mat2, beta, alpha);
7365}
7366
7367// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
7368inline at::Tensor & sspaddmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
7369 return at::_ops::sspaddmm_out::call(self, mat1, mat2, beta, alpha, out);
7370}
7371// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
7372inline at::Tensor & sspaddmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
7373 return at::_ops::sspaddmm_out::call(self, mat1, mat2, beta, alpha, out);
7374}
7375
7376// aten::stack(Tensor[] tensors, int dim=0) -> Tensor
7377inline at::Tensor stack(at::TensorList tensors, int64_t dim=0) {
7378 return at::_ops::stack::call(tensors, dim);
7379}
7380
7381// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
7382inline at::Tensor & stack_out(at::Tensor & out, at::TensorList tensors, int64_t dim=0) {
7383 return at::_ops::stack_out::call(tensors, dim, out);
7384}
7385// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
7386inline at::Tensor & stack_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) {
7387 return at::_ops::stack_out::call(tensors, dim, out);
7388}
7389
7390// aten::_stack(Tensor[] tensors, int dim=0) -> Tensor
7391inline at::Tensor _stack(at::TensorList tensors, int64_t dim=0) {
7392 return at::_ops::_stack::call(tensors, dim);
7393}
7394
7395// aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
7396inline at::Tensor & _stack_out(at::Tensor & out, at::TensorList tensors, int64_t dim=0) {
7397 return at::_ops::_stack_out::call(tensors, dim, out);
7398}
7399// aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
7400inline at::Tensor & _stack_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) {
7401 return at::_ops::_stack_out::call(tensors, dim, out);
7402}
7403
7404// aten::hstack(Tensor[] tensors) -> Tensor
7406 return at::_ops::hstack::call(tensors);
7407}
7408
7409// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
7411 return at::_ops::hstack_out::call(tensors, out);
7412}
7413// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
7415 return at::_ops::hstack_out::call(tensors, out);
7416}
7417
7418// aten::vstack(Tensor[] tensors) -> Tensor
7420 return at::_ops::vstack::call(tensors);
7421}
7422
7423// aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
7425 return at::_ops::vstack_out::call(tensors, out);
7426}
7427// aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
7429 return at::_ops::vstack_out::call(tensors, out);
7430}
7431
7432// aten::dstack(Tensor[] tensors) -> Tensor
7434 return at::_ops::dstack::call(tensors);
7435}
7436
7437// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
7439 return at::_ops::dstack_out::call(tensors, out);
7440}
7441// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
7443 return at::_ops::dstack_out::call(tensors, out);
7444}
7445
7446// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
7447inline at::Tensor stft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided=c10::nullopt, c10::optional<bool> return_complex=c10::nullopt) {
7448 return at::_ops::stft::call(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex);
7449}
7450
7451// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor
7452inline at::Tensor stft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length=c10::nullopt, c10::optional<int64_t> win_length=c10::nullopt, const c10::optional<at::Tensor> & window={}, bool center=true, c10::string_view pad_mode="reflect", bool normalized=false, c10::optional<bool> onesided=c10::nullopt, c10::optional<bool> return_complex=c10::nullopt) {
7453 return at::_ops::stft_center::call(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex);
7454}
7455
7456// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor
7457inline at::Tensor istft(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length=c10::nullopt, c10::optional<int64_t> win_length=c10::nullopt, const c10::optional<at::Tensor> & window={}, bool center=true, bool normalized=false, c10::optional<bool> onesided=c10::nullopt, c10::optional<int64_t> length=c10::nullopt, bool return_complex=false) {
7458 return at::_ops::istft::call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
7459}
7460
7461// aten::stride.int(Tensor self, int dim) -> int
7462inline int64_t __dispatch_stride(const at::Tensor & self, int64_t dim) {
7463 return at::_ops::stride_int::call(self, dim);
7464}
7465
7466// aten::stride.Dimname(Tensor self, Dimname dim) -> int
7467inline int64_t stride(const at::Tensor & self, at::Dimname dim) {
7468 return at::_ops::stride_Dimname::call(self, dim);
7469}
7470
7471// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
7473 return at::_ops::sum::call(self, dtype);
7474}
7475
7476// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
7477inline at::Tensor sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
7478 return at::_ops::sum_dim_IntList::call(self, dim, keepdim, dtype);
7479}
7480
7481// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
7482inline at::Tensor sum(const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
7483 return at::_ops::sum_dim_DimnameList::call(self, dim, keepdim, dtype);
7484}
7485
7486// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
7487inline at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
7488 return at::_ops::sum_IntList_out::call(self, dim, keepdim, dtype, out);
7489}
7490// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
7491inline at::Tensor & sum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
7492 return at::_ops::sum_IntList_out::call(self, dim, keepdim, dtype, out);
7493}
7494
7495// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
7496inline at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
7497 return at::_ops::sum_DimnameList_out::call(self, dim, keepdim, dtype, out);
7498}
7499// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
7500inline at::Tensor & sum_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
7501 return at::_ops::sum_DimnameList_out::call(self, dim, keepdim, dtype, out);
7502}
7503
7504// aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor
7505inline at::Tensor _nested_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false) {
7506 return at::_ops::_nested_sum_backward::call(grad, self, dim, keepdim);
7507}
7508
7509// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
7510inline at::Tensor nansum(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
7511 return at::_ops::nansum::call(self, dim, keepdim, dtype);
7512}
7513
7514// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
7515inline at::Tensor & nansum_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
7516 return at::_ops::nansum_out::call(self, dim, keepdim, dtype, out);
7517}
7518// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
7519inline at::Tensor & nansum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
7520 return at::_ops::nansum_out::call(self, dim, keepdim, dtype, out);
7521}
7522
7523// aten::sqrt(Tensor self) -> Tensor
7524inline at::Tensor sqrt(const at::Tensor & self) {
7525 return at::_ops::sqrt::call(self);
7526}
7527
7528// aten::sqrt_(Tensor(a!) self) -> Tensor(a!)
7529inline at::Tensor & sqrt_(at::Tensor & self) {
7530 return at::_ops::sqrt_::call(self);
7531}
7532
7533// aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7534inline at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self) {
7535 return at::_ops::sqrt_out::call(self, out);
7536}
7537// aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7538inline at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out) {
7539 return at::_ops::sqrt_out::call(self, out);
7540}
7541
7542// aten::square(Tensor self) -> Tensor
7543inline at::Tensor square(const at::Tensor & self) {
7544 return at::_ops::square::call(self);
7545}
7546
7547// aten::square_(Tensor(a!) self) -> Tensor(a!)
7549 return at::_ops::square_::call(self);
7550}
7551
7552// aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7553inline at::Tensor & square_out(at::Tensor & out, const at::Tensor & self) {
7554 return at::_ops::square_out::call(self, out);
7555}
7556// aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7557inline at::Tensor & square_outf(const at::Tensor & self, at::Tensor & out) {
7558 return at::_ops::square_out::call(self, out);
7559}
7560
7561// aten::std(Tensor self, bool unbiased=True) -> Tensor
7562inline at::Tensor std(const at::Tensor & self, bool unbiased) {
7563 return at::_ops::std::call(self, unbiased);
7564}
7565
7566// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
7567inline at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) {
7568 return at::_ops::std_dim::call(self, dim, unbiased, keepdim);
7569}
7570
7571// aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
7572inline at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
7573 return at::_ops::std_correction::call(self, dim, correction, keepdim);
7574}
7575
7576// aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
7577inline ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, bool unbiased) {
7578 return at::_ops::std_mean::call(self, unbiased);
7579}
7580
7581// aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
7582inline ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) {
7583 return at::_ops::std_mean_dim::call(self, dim, unbiased, keepdim);
7584}
7585
7586// aten::std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
7587inline ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
7588 return at::_ops::std_mean_correction::call(self, dim, correction, keepdim);
7589}
7590
7591// aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
7592inline ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) {
7593 return at::_ops::std_mean_names_dim::call(self, dim, unbiased, keepdim);
7594}
7595
7596// aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
7597inline ::std::tuple<at::Tensor,at::Tensor> std_mean(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
7598 return at::_ops::std_mean_correction_names::call(self, dim, correction, keepdim);
7599}
7600
7601// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
7602inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) {
7603 return at::_ops::std_out::call(self, dim, unbiased, keepdim, out);
7604}
7605// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
7606inline at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
7607 return at::_ops::std_out::call(self, dim, unbiased, keepdim, out);
7608}
7609
7610// aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
7611inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
7612 return at::_ops::std_correction_out::call(self, dim, correction, keepdim, out);
7613}
7614// aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
7615inline at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
7616 return at::_ops::std_correction_out::call(self, dim, correction, keepdim, out);
7617}
7618
7619// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
7620inline at::Tensor std(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) {
7621 return at::_ops::std_names_dim::call(self, dim, unbiased, keepdim);
7622}
7623
7624// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
7625inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) {
7626 return at::_ops::std_names_out::call(self, dim, unbiased, keepdim, out);
7627}
7628// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
7629inline at::Tensor & std_outf(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
7630 return at::_ops::std_names_out::call(self, dim, unbiased, keepdim, out);
7631}
7632
7633// aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
7634inline at::Tensor std(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
7635 return at::_ops::std_correction_names::call(self, dim, correction, keepdim);
7636}
7637
7638// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
7639inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
7640 return at::_ops::std_correction_names_out::call(self, dim, correction, keepdim, out);
7641}
7642// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
7643inline at::Tensor & std_outf(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
7644 return at::_ops::std_correction_names_out::call(self, dim, correction, keepdim, out);
7645}
7646
7647// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
7649 return at::_ops::prod::call(self, dtype);
7650}
7651
7652// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
7653inline at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
7654 return at::_ops::prod_dim_int::call(self, dim, keepdim, dtype);
7655}
7656
7657// aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
7658inline at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
7659 return at::_ops::prod_int_out::call(self, dim, keepdim, dtype, out);
7660}
7661// aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
7662inline at::Tensor & prod_outf(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
7663 return at::_ops::prod_int_out::call(self, dim, keepdim, dtype, out);
7664}
7665
7666// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
7667inline at::Tensor prod(const at::Tensor & self, at::Dimname dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
7668 return at::_ops::prod_dim_Dimname::call(self, dim, keepdim, dtype);
7669}
7670
7671// aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
7672inline at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
7673 return at::_ops::prod_Dimname_out::call(self, dim, keepdim, dtype, out);
7674}
7675// aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
7676inline at::Tensor & prod_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
7677 return at::_ops::prod_Dimname_out::call(self, dim, keepdim, dtype, out);
7678}
7679
7680// aten::t(Tensor(a) self) -> Tensor(a)
7681inline at::Tensor t(const at::Tensor & self) {
7682 return at::_ops::t::call(self);
7683}
7684
7685// aten::tan(Tensor self) -> Tensor
7686inline at::Tensor tan(const at::Tensor & self) {
7687 return at::_ops::tan::call(self);
7688}
7689
7690// aten::tan_(Tensor(a!) self) -> Tensor(a!)
7691inline at::Tensor & tan_(at::Tensor & self) {
7692 return at::_ops::tan_::call(self);
7693}
7694
7695// aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7696inline at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self) {
7697 return at::_ops::tan_out::call(self, out);
7698}
7699// aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7700inline at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out) {
7701 return at::_ops::tan_out::call(self, out);
7702}
7703
7704// aten::tanh(Tensor self) -> Tensor
7705inline at::Tensor tanh(const at::Tensor & self) {
7706 return at::_ops::tanh::call(self);
7707}
7708
7709// aten::tanh_(Tensor(a!) self) -> Tensor(a!)
7710inline at::Tensor & tanh_(at::Tensor & self) {
7711 return at::_ops::tanh_::call(self);
7712}
7713
7714// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7715inline at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) {
7716 return at::_ops::tanh_out::call(self, out);
7717}
7718// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7719inline at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) {
7720 return at::_ops::tanh_out::call(self, out);
7721}
7722
7723// aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
7724inline at::Tensor tensordot(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
7725 return at::_ops::tensordot::call(self, other, dims_self, dims_other);
7726}
7727
7728// aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)
7729inline at::Tensor & tensordot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
7730 return at::_ops::tensordot_out::call(self, other, dims_self, dims_other, out);
7731}
7732// aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)
7733inline at::Tensor & tensordot_outf(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out) {
7734 return at::_ops::tensordot_out::call(self, other, dims_self, dims_other, out);
7735}
7736
7737// aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
7738inline at::Tensor threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
7739 return at::_ops::threshold::call(self, threshold, value);
7740}
7741
7742// aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
7743inline at::Tensor & threshold_(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
7744 return at::_ops::threshold_::call(self, threshold, value);
7745}
7746
7747// aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
7748inline at::Tensor & threshold_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
7749 return at::_ops::threshold_out::call(self, threshold, value, out);
7750}
7751// aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
7752inline at::Tensor & threshold_outf(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out) {
7753 return at::_ops::threshold_out::call(self, threshold, value, out);
7754}
7755
7756// aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
7757inline at::Tensor & threshold_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
7758 return at::_ops::threshold_backward_grad_input::call(grad_output, self, threshold, grad_input);
7759}
7760// aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
7761inline at::Tensor & threshold_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
7762 return at::_ops::threshold_backward_grad_input::call(grad_output, self, threshold, grad_input);
7763}
7764
7765// aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor
7766inline at::Tensor threshold_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
7767 return at::_ops::threshold_backward::call(grad_output, self, threshold);
7768}
7769
7770// aten::tile(Tensor self, int[] dims) -> Tensor
7771inline at::Tensor tile(const at::Tensor & self, at::IntArrayRef dims) {
7772 return at::_ops::tile::call(self, dims);
7773}
7774
7775// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
7776inline at::Tensor transpose(const at::Tensor & self, int64_t dim0, int64_t dim1) {
7777 return at::_ops::transpose_int::call(self, dim0, dim1);
7778}
7779
7780// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
7781inline at::Tensor transpose(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) {
7782 return at::_ops::transpose_Dimname::call(self, dim0, dim1);
7783}
7784
7785// aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
7786inline at::Tensor _mkldnn_transpose(const at::Tensor & self, int64_t dim0, int64_t dim1) {
7787 return at::_ops::_mkldnn_transpose::call(self, dim0, dim1);
7788}
7789
7790// aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
7791inline at::Tensor & _mkldnn_transpose_(at::Tensor & self, int64_t dim0, int64_t dim1) {
7792 return at::_ops::_mkldnn_transpose_::call(self, dim0, dim1);
7793}
7794
7795// aten::one_hot(Tensor self, int num_classes=-1) -> Tensor
7796inline at::Tensor one_hot(const at::Tensor & self, int64_t num_classes=-1) {
7797 return at::_ops::one_hot::call(self, num_classes);
7798}
7799
7800// aten::flip(Tensor self, int[] dims) -> Tensor
7801inline at::Tensor flip(const at::Tensor & self, at::IntArrayRef dims) {
7802 return at::_ops::flip::call(self, dims);
7803}
7804
7805// aten::fliplr(Tensor self) -> Tensor
7806inline at::Tensor fliplr(const at::Tensor & self) {
7807 return at::_ops::fliplr::call(self);
7808}
7809
7810// aten::flipud(Tensor self) -> Tensor
7811inline at::Tensor flipud(const at::Tensor & self) {
7812 return at::_ops::flipud::call(self);
7813}
7814
7815// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor
7816inline at::Tensor roll(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims={}) {
7817 return at::_ops::roll::call(self, shifts, dims);
7818}
7819
7820// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
7821inline at::Tensor rot90(const at::Tensor & self, int64_t k=1, at::IntArrayRef dims={0,1}) {
7822 return at::_ops::rot90::call(self, k, dims);
7823}
7824
7825// aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
7826inline at::Tensor trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) {
7827 return at::_ops::trapezoid_x::call(y, x, dim);
7828}
7829
7830// aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
7831inline at::Tensor trapezoid(const at::Tensor & y, const at::Scalar & dx=1, int64_t dim=-1) {
7832 return at::_ops::trapezoid_dx::call(y, dx, dim);
7833}
7834
7835// aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
7836inline at::Tensor trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) {
7837 return at::_ops::trapz_x::call(y, x, dim);
7838}
7839
7840// aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor
7841inline at::Tensor trapz(const at::Tensor & y, double dx=1, int64_t dim=-1) {
7842 return at::_ops::trapz_dx::call(y, dx, dim);
7843}
7844
7845// aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)
7846inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
7847 return at::_ops::_transform_bias_rescale_qkv::call(qkv, qkv_bias, num_heads);
7848}
7849
7850// aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor
7851inline at::Tensor _nested_tensor_from_mask(const at::Tensor & t, const at::Tensor & mask, bool mask_check=true) {
7852 return at::_ops::_nested_tensor_from_mask::call(t, mask, mask_check);
7853}
7854
7855// aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool
7857 return at::_ops::_nested_tensor_from_mask_left_aligned::call(t, mask);
7858}
7859
7860// aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor
7861inline at::Tensor _nested_from_padded(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213=false) {
7862 return at::_ops::_nested_from_padded::call(padded, cpu_nested_shape_example, fuse_transform_0213);
7863}
7864
7865// aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor
7867 return at::_ops::_nested_from_padded_and_nested_example::call(padded, nt_example);
7868}
7869
7870// aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a)
7871inline at::Tensor _nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
7872 return at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets);
7873}
7874
7875// aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor
7876inline at::Tensor _nested_view_from_buffer_copy(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
7877 return at::_ops::_nested_view_from_buffer_copy::call(self, nested_size, nested_strides, offsets);
7878}
7879
7880// aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
7881inline at::Tensor _trilinear(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim=1) {
7882 return at::_ops::_trilinear::call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
7883}
7884
7885// aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
7886inline at::Tensor triplet_margin_loss(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin=1.0, double p=2, double eps=1e-06, bool swap=false, int64_t reduction=at::Reduction::Mean) {
7887 return at::_ops::triplet_margin_loss::call(anchor, positive, negative, margin, p, eps, swap, reduction);
7888}
7889
7890// aten::trunc(Tensor self) -> Tensor
7891inline at::Tensor trunc(const at::Tensor & self) {
7892 return at::_ops::trunc::call(self);
7893}
7894
7895// aten::trunc_(Tensor(a!) self) -> Tensor(a!)
7896inline at::Tensor & trunc_(at::Tensor & self) {
7897 return at::_ops::trunc_::call(self);
7898}
7899
7900// aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7901inline at::Tensor & trunc_out(at::Tensor & out, const at::Tensor & self) {
7902 return at::_ops::trunc_out::call(self, out);
7903}
7904// aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7905inline at::Tensor & trunc_outf(const at::Tensor & self, at::Tensor & out) {
7906 return at::_ops::trunc_out::call(self, out);
7907}
7908
7909// aten::fix(Tensor self) -> Tensor
7910inline at::Tensor fix(const at::Tensor & self) {
7911 return at::_ops::fix::call(self);
7912}
7913
7914// aten::fix_(Tensor(a!) self) -> Tensor(a!)
7915inline at::Tensor & fix_(at::Tensor & self) {
7916 return at::_ops::fix_::call(self);
7917}
7918
7919// aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7920inline at::Tensor & fix_out(at::Tensor & out, const at::Tensor & self) {
7921 return at::_ops::fix_out::call(self, out);
7922}
7923// aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
7924inline at::Tensor & fix_outf(const at::Tensor & self, at::Tensor & out) {
7925 return at::_ops::fix_out::call(self, out);
7926}
7927
7928// aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool
7929inline bool _has_compatible_shallow_copy_type(const at::Tensor & self, const at::Tensor & from) {
7930 return at::_ops::_has_compatible_shallow_copy_type::call(self, from);
7931}
7932
7933// aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
7934inline ::std::tuple<at::Tensor,at::Tensor> _unique(const at::Tensor & self, bool sorted=true, bool return_inverse=false) {
7935 return at::_ops::_unique::call(self, sorted, return_inverse);
7936}
7937
7938// aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
7939inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim(const at::Tensor & self, int64_t dim, bool sorted=true, bool return_inverse=false, bool return_counts=false) {
7940 return at::_ops::unique_dim::call(self, dim, sorted, return_inverse, return_counts);
7941}
7942
7943// aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
7944inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_consecutive(const at::Tensor & self, bool return_inverse=false, bool return_counts=false, c10::optional<int64_t> dim=c10::nullopt) {
7945 return at::_ops::unique_consecutive::call(self, return_inverse, return_counts, dim);
7946}
7947
7948// aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
7949inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_consecutive(const at::Tensor & self, int64_t dim, bool return_inverse=false, bool return_counts=false) {
7950 return at::_ops::unique_dim_consecutive::call(self, dim, return_inverse, return_counts);
7951}
7952
7953// aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
7954inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _unique2(const at::Tensor & self, bool sorted=true, bool return_inverse=false, bool return_counts=false) {
7955 return at::_ops::_unique2::call(self, sorted, return_inverse, return_counts);
7956}
7957
7958// aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor
7959inline at::Tensor _unsafe_view(const at::Tensor & self, at::IntArrayRef size) {
7960 return at::_ops::_unsafe_view::call(self, c10::fromIntArrayRefSlow(size));
7961}
7962namespace symint {
7963 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
7964 at::Tensor _unsafe_view(const at::Tensor & self, at::IntArrayRef size) {
7965 return at::_ops::_unsafe_view::call(self, c10::fromIntArrayRefSlow(size));
7966 }
7967}
7968
7969// aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor
7970inline at::Tensor _unsafe_view_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
7971 return at::_ops::_unsafe_view::call(self, size);
7972}
7973namespace symint {
7974 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
7975 at::Tensor _unsafe_view(const at::Tensor & self, c10::SymIntArrayRef size) {
7976 return at::_ops::_unsafe_view::call(self, size);
7977 }
7978}
7979
7980// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
7981inline at::Tensor unsqueeze(const at::Tensor & self, int64_t dim) {
7982 return at::_ops::unsqueeze::call(self, dim);
7983}
7984
7985// aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
7986inline at::Tensor vander(const at::Tensor & x, c10::optional<int64_t> N=c10::nullopt, bool increasing=false) {
7987 return at::_ops::vander::call(x, N, increasing);
7988}
7989
7990// aten::var(Tensor self, bool unbiased=True) -> Tensor
7991inline at::Tensor var(const at::Tensor & self, bool unbiased) {
7992 return at::_ops::var::call(self, unbiased);
7993}
7994
7995// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
7996inline at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) {
7997 return at::_ops::var_dim::call(self, dim, unbiased, keepdim);
7998}
7999
8000// aten::var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor
8001inline at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
8002 return at::_ops::var_correction::call(self, dim, correction, keepdim);
8003}
8004
8005// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8006inline at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) {
8007 return at::_ops::var_out::call(self, dim, unbiased, keepdim, out);
8008}
8009// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8010inline at::Tensor & var_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
8011 return at::_ops::var_out::call(self, dim, unbiased, keepdim, out);
8012}
8013
8014// aten::var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
8015inline at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
8016 return at::_ops::var_correction_out::call(self, dim, correction, keepdim, out);
8017}
8018// aten::var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
8019inline at::Tensor & var_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
8020 return at::_ops::var_correction_out::call(self, dim, correction, keepdim, out);
8021}
8022
8023// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
8024inline at::Tensor var(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) {
8025 return at::_ops::var_names_dim::call(self, dim, unbiased, keepdim);
8026}
8027
8028// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8029inline at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) {
8030 return at::_ops::var_names_out::call(self, dim, unbiased, keepdim, out);
8031}
8032// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8033inline at::Tensor & var_outf(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
8034 return at::_ops::var_names_out::call(self, dim, unbiased, keepdim, out);
8035}
8036
8037// aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor
8038inline at::Tensor var(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
8039 return at::_ops::var_correction_names::call(self, dim, correction, keepdim);
8040}
8041
8042// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
8043inline at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
8044 return at::_ops::var_correction_names_out::call(self, dim, correction, keepdim, out);
8045}
8046// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
8047inline at::Tensor & var_outf(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) {
8048 return at::_ops::var_correction_names_out::call(self, dim, correction, keepdim, out);
8049}
8050
8051// aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
8052inline ::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, bool unbiased) {
8053 return at::_ops::var_mean::call(self, unbiased);
8054}
8055
8056// aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
8057inline ::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) {
8058 return at::_ops::var_mean_dim::call(self, dim, unbiased, keepdim);
8059}
8060
8061// aten::var_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
8062inline ::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
8063 return at::_ops::var_mean_correction::call(self, dim, correction, keepdim);
8064}
8065
8066// aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
8067inline ::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) {
8068 return at::_ops::var_mean_names_dim::call(self, dim, unbiased, keepdim);
8069}
8070
8071// aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor)
8072inline ::std::tuple<at::Tensor,at::Tensor> var_mean(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
8073 return at::_ops::var_mean_correction_names::call(self, dim, correction, keepdim);
8074}
8075
8076// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor
8077inline at::Tensor where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
8078 return at::_ops::where_self::call(condition, self, other);
8079}
8080
8081// aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8082inline at::Tensor & where_out(at::Tensor & out, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
8083 return at::_ops::where_self_out::call(condition, self, other, out);
8084}
8085// aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
8086inline at::Tensor & where_outf(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
8087 return at::_ops::where_self_out::call(condition, self, other, out);
8088}
8089
8090// aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor
8091inline at::Tensor where(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) {
8092 return at::_ops::where_ScalarSelf::call(condition, self, other);
8093}
8094
8095// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor
8096inline at::Tensor where(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) {
8097 return at::_ops::where_ScalarOther::call(condition, self, other);
8098}
8099
8100// aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor
8101inline at::Tensor where(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) {
8102 return at::_ops::where_Scalar::call(condition, self, other);
8103}
8104
8105// aten::where(Tensor condition) -> Tensor[]
8106inline ::std::vector<at::Tensor> where(const at::Tensor & condition) {
8107 return at::_ops::where::call(condition);
8108}
8109
8110// aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor
8111inline at::Tensor norm_except_dim(const at::Tensor & v, int64_t pow=2, int64_t dim=0) {
8112 return at::_ops::norm_except_dim::call(v, pow, dim);
8113}
8114
8115// aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor
8116inline at::Tensor _weight_norm(const at::Tensor & v, const at::Tensor & g, int64_t dim=0) {
8117 return at::_ops::_weight_norm::call(v, g, dim);
8118}
8119
8120// aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
8121inline ::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface(const at::Tensor & v, const at::Tensor & g, int64_t dim=0) {
8122 return at::_ops::_weight_norm_interface::call(v, g, dim);
8123}
8124
8125// aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
8126inline ::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
8127 return at::_ops::_weight_norm_interface_backward::call(grad_w, saved_v, saved_g, saved_norms, dim);
8128}
8129
8130// aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
8131inline ::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
8132 return at::_ops::_weight_norm_differentiable_backward::call(grad_w, saved_v, saved_g, saved_norms, dim);
8133}
8134
8135// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8136inline at::Tensor zeros(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
8137 return at::_ops::zeros_names::call(size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8138}
8139// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8141 return at::_ops::zeros_names::call(size, names, dtype, layout, device, pin_memory);
8142}
8143
8144// aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8145inline at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options={}) {
8146 return at::_ops::_efficientzerotensor::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8147}
8148// aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8150 return at::_ops::_efficientzerotensor::call(size, dtype, layout, device, pin_memory);
8151}
8152
8153// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8154inline at::Tensor zeros(at::IntArrayRef size, at::TensorOptions options={}) {
8155 return at::_ops::zeros::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8156}
8157namespace symint {
8158 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
8159 at::Tensor zeros(at::IntArrayRef size, at::TensorOptions options={}) {
8160 return at::_ops::zeros::call(c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8161 }
8162}
8163
8164// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8166 return at::_ops::zeros::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
8167}
8168namespace symint {
8169 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
8171 return at::_ops::zeros::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
8172 }
8173}
8174
8175// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8176inline at::Tensor zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) {
8177 return at::_ops::zeros::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8178}
8179namespace symint {
8180 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
8181 at::Tensor zeros(c10::SymIntArrayRef size, at::TensorOptions options={}) {
8182 return at::_ops::zeros::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8183 }
8184}
8185
8186// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8188 return at::_ops::zeros::call(size, dtype, layout, device, pin_memory);
8189}
8190namespace symint {
8191 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
8193 return at::_ops::zeros::call(size, dtype, layout, device, pin_memory);
8194 }
8195}
8196
8197// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
8198inline at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size) {
8199 return at::_ops::zeros_out::call(c10::fromIntArrayRefSlow(size), out);
8200}
8201namespace symint {
8202 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
8203 at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size) {
8204 return at::_ops::zeros_out::call(c10::fromIntArrayRefSlow(size), out);
8205 }
8206}
8207
8208// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
8209inline at::Tensor & zeros_outf(at::IntArrayRef size, at::Tensor & out) {
8210 return at::_ops::zeros_out::call(c10::fromIntArrayRefSlow(size), out);
8211}
8212namespace symint {
8213 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
8214 at::Tensor & zeros_outf(at::IntArrayRef size, at::Tensor & out) {
8215 return at::_ops::zeros_out::call(c10::fromIntArrayRefSlow(size), out);
8216 }
8217}
8218
8219// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
8220inline at::Tensor & zeros_symint_out(at::Tensor & out, c10::SymIntArrayRef size) {
8221 return at::_ops::zeros_out::call(size, out);
8222}
8223namespace symint {
8224 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
8225 at::Tensor & zeros_out(at::Tensor & out, c10::SymIntArrayRef size) {
8226 return at::_ops::zeros_out::call(size, out);
8227 }
8228}
8229
8230// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
8231inline at::Tensor & zeros_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) {
8232 return at::_ops::zeros_out::call(size, out);
8233}
8234namespace symint {
8235 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
8236 at::Tensor & zeros_outf(c10::SymIntArrayRef size, at::Tensor & out) {
8237 return at::_ops::zeros_out::call(size, out);
8238 }
8239}
8240
8241// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
8242inline at::Tensor zeros_like(const at::Tensor & self, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
8243 return at::_ops::zeros_like::call(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
8244}
8245// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
8247 return at::_ops::zeros_like::call(self, dtype, layout, device, pin_memory, memory_format);
8248}
8249
8250// aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor
8251inline at::Tensor _standard_gamma_grad(const at::Tensor & self, const at::Tensor & output) {
8252 return at::_ops::_standard_gamma_grad::call(self, output);
8253}
8254
8255// aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor
8257 return at::_ops::_standard_gamma::call(self, generator);
8258}
8259
8260// aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
8261inline at::Tensor _dirichlet_grad(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
8262 return at::_ops::_dirichlet_grad::call(x, alpha, total);
8263}
8264
8265// aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor
8267 return at::_ops::_sample_dirichlet::call(self, generator);
8268}
8269
8270// aten::poisson(Tensor self, Generator? generator=None) -> Tensor
8272 return at::_ops::poisson::call(self, generator);
8273}
8274
8275// aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor
8277 return at::_ops::binomial::call(count, prob, generator);
8278}
8279
8280// aten::native_norm(Tensor self, Scalar p=2) -> Tensor
8281inline at::Tensor native_norm(const at::Tensor & self, const at::Scalar & p=2) {
8282 return at::_ops::native_norm::call(self, p);
8283}
8284
8285// aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor
8286inline at::Tensor native_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
8287 return at::_ops::native_norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
8288}
8289
8290// aten::_sparse_sum(Tensor self) -> Tensor
8291inline at::Tensor _sparse_sum(const at::Tensor & self) {
8292 return at::_ops::_sparse_sum::call(self);
8293}
8294
8295// aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor
8296inline at::Tensor _sparse_sum(const at::Tensor & self, at::ScalarType dtype) {
8297 return at::_ops::_sparse_sum_dtype::call(self, dtype);
8298}
8299
8300// aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor
8301inline at::Tensor _sparse_sum(const at::Tensor & self, at::IntArrayRef dim) {
8302 return at::_ops::_sparse_sum_dim::call(self, dim);
8303}
8304
8305// aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor
8306inline at::Tensor _sparse_sum(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) {
8307 return at::_ops::_sparse_sum_dim_dtype::call(self, dim, dtype);
8308}
8309
8310// aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
8311inline at::Tensor _sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
8312 return at::_ops::_sparse_sum_backward::call(grad, self, dim);
8313}
8314
8315// aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
8316inline at::Tensor _sparse_csr_sum(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
8317 return at::_ops::_sparse_csr_sum_dim_dtype::call(self, dim, keepdim, dtype);
8318}
8319
8320// aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
8321inline at::Tensor _sparse_csr_prod(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
8322 return at::_ops::_sparse_csr_prod_dim_dtype::call(self, dim, keepdim, dtype);
8323}
8324
8325// aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
8327 return at::_ops::_sparse_softmax_int::call(self, dim, dtype);
8328}
8329
8330// aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
8332 return at::_ops::_sparse_softmax_Dimname::call(self, dim, dtype);
8333}
8334
8335// aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
8336inline at::Tensor _sparse_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
8337 return at::_ops::_sparse_softmax::call(self, dim, half_to_float);
8338}
8339
8340// aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
8341inline at::Tensor _sparse_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
8342 return at::_ops::_sparse_softmax_backward_data::call(grad_output, output, dim, self);
8343}
8344
8345// aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
8347 return at::_ops::_sparse_log_softmax_int::call(self, dim, dtype);
8348}
8349
8350// aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
8352 return at::_ops::_sparse_log_softmax_Dimname::call(self, dim, dtype);
8353}
8354
8355// aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
8356inline at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
8357 return at::_ops::_sparse_log_softmax::call(self, dim, half_to_float);
8358}
8359
8360// aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
8361inline at::Tensor _sparse_log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
8362 return at::_ops::_sparse_log_softmax_backward_data::call(grad_output, output, dim, self);
8363}
8364
8365// aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor
8366inline at::Tensor _spdiags(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout=c10::nullopt) {
8367 return at::_ops::_spdiags::call(diagonals, offsets, shape, layout);
8368}
8369
8370// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
8371inline at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype) {
8372 return at::_ops::norm_ScalarOpt_dtype::call(self, p, dtype);
8373}
8374
8375// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor
8376inline at::Tensor norm(const at::Tensor & self, const at::Scalar & p=2) {
8377 return at::_ops::norm_Scalar::call(self, p);
8378}
8379
8380// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
8381inline at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
8382 return at::_ops::norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
8383}
8384
8385// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
8386inline at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim=false) {
8387 return at::_ops::norm_ScalarOpt_dim::call(self, p, dim, keepdim);
8388}
8389
8390// aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
8391inline at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
8392 return at::_ops::norm_dtype_out::call(self, p, dim, keepdim, dtype, out);
8393}
8394// aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
8395inline at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
8396 return at::_ops::norm_dtype_out::call(self, p, dim, keepdim, dtype, out);
8397}
8398
8399// aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8400inline at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim=false) {
8401 return at::_ops::norm_out::call(self, p, dim, keepdim, out);
8402}
8403// aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8404inline at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
8405 return at::_ops::norm_out::call(self, p, dim, keepdim, out);
8406}
8407
8408// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
8409inline at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
8410 return at::_ops::norm_names_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
8411}
8412
8413// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor
8414inline at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim=false) {
8415 return at::_ops::norm_names_ScalarOpt_dim::call(self, p, dim, keepdim);
8416}
8417
8418// aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
8419inline at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
8420 return at::_ops::norm_names_dtype_out::call(self, p, dim, keepdim, dtype, out);
8421}
8422// aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
8423inline at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
8424 return at::_ops::norm_names_dtype_out::call(self, p, dim, keepdim, dtype, out);
8425}
8426
8427// aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8428inline at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim=false) {
8429 return at::_ops::norm_names_out::call(self, p, dim, keepdim, out);
8430}
8431// aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8432inline at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out) {
8433 return at::_ops::norm_names_out::call(self, p, dim, keepdim, out);
8434}
8435
8436// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)
8437inline ::std::tuple<at::Tensor,at::Tensor> frexp(const at::Tensor & self) {
8438 return at::_ops::frexp_Tensor::call(self);
8439}
8440
8441// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)
8442inline ::std::tuple<at::Tensor &,at::Tensor &> frexp_out(at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self) {
8443 return at::_ops::frexp_Tensor_out::call(self, mantissa, exponent);
8444}
8445// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)
8446inline ::std::tuple<at::Tensor &,at::Tensor &> frexp_outf(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) {
8447 return at::_ops::frexp_Tensor_out::call(self, mantissa, exponent);
8448}
8449
8450// aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
8451inline at::Tensor frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) {
8452 return at::_ops::frobenius_norm_dim::call(self, dim, keepdim);
8453}
8454
8455// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8456inline at::Tensor & frobenius_norm_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) {
8457 return at::_ops::frobenius_norm_out::call(self, dim, keepdim, out);
8458}
8459// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8460inline at::Tensor & frobenius_norm_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
8461 return at::_ops::frobenius_norm_out::call(self, dim, keepdim, out);
8462}
8463
8464// aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor
8465inline at::Tensor nuclear_norm(const at::Tensor & self, bool keepdim=false) {
8466 return at::_ops::nuclear_norm::call(self, keepdim);
8467}
8468
8469// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8470inline at::Tensor & nuclear_norm_out(at::Tensor & out, const at::Tensor & self, bool keepdim=false) {
8471 return at::_ops::nuclear_norm_out::call(self, keepdim, out);
8472}
8473// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8474inline at::Tensor & nuclear_norm_outf(const at::Tensor & self, bool keepdim, at::Tensor & out) {
8475 return at::_ops::nuclear_norm_out::call(self, keepdim, out);
8476}
8477
8478// aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor
8479inline at::Tensor nuclear_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) {
8480 return at::_ops::nuclear_norm_dim::call(self, dim, keepdim);
8481}
8482
8483// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8484inline at::Tensor & nuclear_norm_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) {
8485 return at::_ops::nuclear_norm_dim_out::call(self, dim, keepdim, out);
8486}
8487// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
8488inline at::Tensor & nuclear_norm_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
8489 return at::_ops::nuclear_norm_dim_out::call(self, dim, keepdim, out);
8490}
8491
8492// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
8494 return at::_ops::clone::call(self, memory_format);
8495}
8496
8497// aten::positive(Tensor(a) self) -> Tensor(a)
8498inline at::Tensor positive(const at::Tensor & self) {
8499 return at::_ops::positive::call(self);
8500}
8501
8502// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
8503inline const at::Tensor & resize_as_(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
8504 return at::_ops::resize_as_::call(self, the_template, memory_format);
8505}
8506
8507// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)
8508inline const at::Tensor & resize_as_sparse_(const at::Tensor & self, const at::Tensor & the_template) {
8509 return at::_ops::resize_as_sparse_::call(self, the_template);
8510}
8511
8512// aten::zero_(Tensor(a!) self) -> Tensor(a!)
8513inline at::Tensor & zero_(at::Tensor & self) {
8514 return at::_ops::zero_::call(self);
8515}
8516
8517// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
8518inline at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) {
8519 return at::_ops::sub_out::call(self, other, alpha, out);
8520}
8521// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
8522inline at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
8523 return at::_ops::sub_out::call(self, other, alpha, out);
8524}
8525
8526// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
8527inline at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) {
8528 return at::_ops::sub_Tensor::call(self, other, alpha);
8529}
8530
8531// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
8532inline at::Tensor sub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) {
8533 return at::_ops::sub_Scalar::call(self, other, alpha);
8534}
8535
8536// aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
8537inline at::Tensor & subtract_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) {
8538 return at::_ops::subtract_out::call(self, other, alpha, out);
8539}
8540// aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
8541inline at::Tensor & subtract_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
8542 return at::_ops::subtract_out::call(self, other, alpha, out);
8543}
8544
8545// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
8546inline at::Tensor subtract(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) {
8547 return at::_ops::subtract_Tensor::call(self, other, alpha);
8548}
8549
8550// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
8551inline at::Tensor subtract(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) {
8552 return at::_ops::subtract_Scalar::call(self, other, alpha);
8553}
8554
8555// aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
8556inline at::Tensor rsub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) {
8557 return at::_ops::rsub_Tensor::call(self, other, alpha);
8558}
8559
8560// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
8561inline at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values) {
8562 return at::_ops::heaviside_out::call(self, values, out);
8563}
8564// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
8565inline at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out) {
8566 return at::_ops::heaviside_out::call(self, values, out);
8567}
8568
8569// aten::heaviside(Tensor self, Tensor values) -> Tensor
8570inline at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values) {
8571 return at::_ops::heaviside::call(self, values);
8572}
8573
8574// aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
8575inline at::Tensor rsub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) {
8576 return at::_ops::rsub_Scalar::call(self, other, alpha);
8577}
8578
8579// aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
8580inline at::Tensor _sparse_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
8581 return at::_ops::_sparse_addmm::call(self, mat1, mat2, beta, alpha);
8582}
8583
8584// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
8585inline at::Tensor & sparse_sampled_addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
8586 return at::_ops::sparse_sampled_addmm_out::call(self, mat1, mat2, beta, alpha, out);
8587}
8588// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
8589inline at::Tensor & sparse_sampled_addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
8590 return at::_ops::sparse_sampled_addmm_out::call(self, mat1, mat2, beta, alpha, out);
8591}
8592
8593// aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
8594inline at::Tensor sparse_sampled_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
8595 return at::_ops::sparse_sampled_addmm::call(self, mat1, mat2, beta, alpha);
8596}
8597
8598// aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)
8599inline ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
8600 return at::_ops::_sparse_mm_reduce_impl::call(self, other, reduce);
8601}
8602
8603// aten::_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor)
8604inline ::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
8605 return at::_ops::_sparse_mm_reduce_impl_backward::call(self, grad_out, weight, reduce, arg_out, output_mask);
8606}
8607
8608// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
8609inline at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
8610 return at::_ops::addmm_out::call(self, mat1, mat2, beta, alpha, out);
8611}
8612// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
8613inline at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
8614 return at::_ops::addmm_out::call(self, mat1, mat2, beta, alpha, out);
8615}
8616
8617// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
8618inline at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
8619 return at::_ops::addmm::call(self, mat1, mat2, beta, alpha);
8620}
8621
8622// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
8623inline at::Tensor & _addmm_activation_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) {
8624 return at::_ops::_addmm_activation_out::call(self, mat1, mat2, beta, alpha, use_gelu, out);
8625}
8626// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
8627inline at::Tensor & _addmm_activation_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) {
8628 return at::_ops::_addmm_activation_out::call(self, mat1, mat2, beta, alpha, use_gelu, out);
8629}
8630
8631// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
8632inline at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) {
8633 return at::_ops::_addmm_activation::call(self, mat1, mat2, beta, alpha, use_gelu);
8634}
8635
8636// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8637inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
8638 return at::_ops::sparse_compressed_tensor_comp_plain_value_size::call(compressed_indices, plain_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8639}
8640// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8641inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8642 return at::_ops::sparse_compressed_tensor_comp_plain_value_size::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
8643}
8644
8645// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8646inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
8647 return at::_ops::sparse_csr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8648}
8649// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8650inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8651 return at::_ops::sparse_csr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
8652}
8653
8654// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8655inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
8656 return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8657}
8658// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8659inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8660 return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
8661}
8662
8663// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8664inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
8665 return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8666}
8667// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8668inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8669 return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
8670}
8671
8672// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8673inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
8674 return at::_ops::sparse_bsc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8675}
8676// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8677inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8678 return at::_ops::sparse_bsc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
8679}
8680
8681// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8682inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::TensorOptions options) {
8683 return at::_ops::sparse_compressed_tensor_comp_plain_value::call(compressed_indices, plain_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8684}
8685// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8686inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8687 return at::_ops::sparse_compressed_tensor_comp_plain_value::call(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
8688}
8689
8690// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8691inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
8692 return at::_ops::sparse_csr_tensor_crow_col_value::call(crow_indices, col_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8693}
8694// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8695inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8696 return at::_ops::sparse_csr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
8697}
8698
8699// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8700inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
8701 return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8702}
8703// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8704inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8705 return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
8706}
8707
8708// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8709inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
8710 return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8711}
8712// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8713inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8714 return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
8715}
8716
8717// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8718inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
8719 return at::_ops::sparse_bsc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8720}
8721// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8722inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8723 return at::_ops::sparse_bsc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
8724}
8725
8726// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8727inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) {
8728 return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8729}
8730// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8731inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8732 return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
8733}
8734
8735// aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8736inline at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) {
8737 return at::_ops::_sparse_csr_tensor_unsafe::call(crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8738}
8739// aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8740inline at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8741 return at::_ops::_sparse_csr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
8742}
8743
8744// aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8745inline at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) {
8746 return at::_ops::_sparse_csc_tensor_unsafe::call(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8747}
8748// aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8749inline at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8750 return at::_ops::_sparse_csc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
8751}
8752
8753// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8754inline at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) {
8755 return at::_ops::_sparse_bsr_tensor_unsafe::call(crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8756}
8757// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8758inline at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8759 return at::_ops::_sparse_bsr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
8760}
8761
8762// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8763inline at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) {
8764 return at::_ops::_sparse_bsc_tensor_unsafe::call(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8765}
8766// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8767inline at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8768 return at::_ops::_sparse_bsc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
8769}
8770
8771// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8772inline at::Tensor sparse_coo_tensor(at::IntArrayRef size, at::TensorOptions options) {
8773 return at::_ops::sparse_coo_tensor_size::call(size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8774}
8775// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8777 return at::_ops::sparse_coo_tensor_size::call(size, dtype, layout, device, pin_memory);
8778}
8779
8780// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8781inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options={}) {
8782 return at::_ops::sparse_coo_tensor_indices::call(indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8783}
8784// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8786 return at::_ops::sparse_coo_tensor_indices::call(indices, values, dtype, layout, device, pin_memory);
8787}
8788
8789// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8790inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) {
8791 return at::_ops::sparse_coo_tensor_indices_size::call(indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8792}
8793// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8795 return at::_ops::sparse_coo_tensor_indices_size::call(indices, values, size, dtype, layout, device, pin_memory);
8796}
8797
8798// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8799inline at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) {
8800 return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8801}
8802namespace symint {
8803 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
8804 at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) {
8805 return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8806 }
8807}
8808
8809// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8811 return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
8812}
8813namespace symint {
8814 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
8816 return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory);
8817 }
8818}
8819
8820// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8821inline at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}) {
8822 return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8823}
8824namespace symint {
8825 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
8826 at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}) {
8827 return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8828 }
8829}
8830
8831// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8833 return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, dtype, layout, device, pin_memory);
8834}
8835namespace symint {
8836 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
8838 return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, dtype, layout, device, pin_memory);
8839 }
8840}
8841
8842// aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> ()
8843inline void _validate_sparse_coo_tensor_args(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size) {
8844 return at::_ops::_validate_sparse_coo_tensor_args::call(indices, values, size);
8845}
8846
8847// aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()
8848inline void _validate_sparse_compressed_tensor_args(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) {
8849 return at::_ops::_validate_sparse_compressed_tensor_args::call(compressed_indices, plain_indices, values, size, layout);
8850}
8851
8852// aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
8853inline void _validate_sparse_csr_tensor_args(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
8854 return at::_ops::_validate_sparse_csr_tensor_args::call(crow_indices, col_indices, values, size);
8855}
8856
8857// aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
8858inline void _validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
8859 return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size);
8860}
8861
8862// aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
8863inline void _validate_sparse_bsr_tensor_args(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
8864 return at::_ops::_validate_sparse_bsr_tensor_args::call(crow_indices, col_indices, values, size);
8865}
8866
8867// aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
8868inline void _validate_sparse_bsc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
8869 return at::_ops::_validate_sparse_bsc_tensor_args::call(ccol_indices, row_indices, values, size);
8870}
8871
8872// aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8873inline at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) {
8874 return at::_ops::_sparse_coo_tensor_with_dims::call(sparse_dim, dense_dim, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8875}
8876// aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8877inline at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8878 return at::_ops::_sparse_coo_tensor_with_dims::call(sparse_dim, dense_dim, size, dtype, layout, device, pin_memory);
8879}
8880
8881// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8882inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) {
8883 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8884}
8885namespace symint {
8886 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
8887 at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) {
8888 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8889 }
8890}
8891
8892// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8893inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8894 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory);
8895}
8896namespace symint {
8897 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
8898 at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8899 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, dtype, layout, device, pin_memory);
8900 }
8901}
8902
8903// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8904inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) {
8905 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8906}
8907namespace symint {
8908 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
8909 at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) {
8910 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
8911 }
8912}
8913
8914// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
8915inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8916 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory);
8917}
8918namespace symint {
8919 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
8920 at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
8921 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory);
8922 }
8923}
8924
8925// aten::_to_cpu(Tensor[] tensors) -> Tensor[]
8926inline ::std::vector<at::Tensor> _to_cpu(at::TensorList tensors) {
8927 return at::_ops::_to_cpu::call(tensors);
8928}
8929
8930// aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor
8931inline at::Tensor to_dense_backward(const at::Tensor & grad, const at::Tensor & input) {
8932 return at::_ops::to_dense_backward::call(grad, input);
8933}
8934
8935// aten::_coalesce(Tensor self) -> Tensor
8936inline at::Tensor _coalesce(const at::Tensor & self) {
8937 return at::_ops::_coalesce::call(self);
8938}
8939
8940// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
8941inline at::Tensor & hspmm_out(at::Tensor & out, const at::Tensor & mat1, const at::Tensor & mat2) {
8942 return at::_ops::hspmm_out::call(mat1, mat2, out);
8943}
8944// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
8945inline at::Tensor & hspmm_outf(const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
8946 return at::_ops::hspmm_out::call(mat1, mat2, out);
8947}
8948
8949// aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor
8950inline at::Tensor hspmm(const at::Tensor & mat1, const at::Tensor & mat2) {
8951 return at::_ops::hspmm::call(mat1, mat2);
8952}
8953
8954// aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)
8955inline at::Tensor & copy_sparse_to_sparse_(at::Tensor & self, const at::Tensor & src, bool non_blocking=false) {
8956 return at::_ops::copy_sparse_to_sparse_::call(self, src, non_blocking);
8957}
8958
8959// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]
8960inline ::std::vector<at::Tensor> unbind(const at::Tensor & self, int64_t dim=0) {
8961 return at::_ops::unbind_int::call(self, dim);
8962}
8963
8964// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]
8965inline ::std::vector<at::Tensor> unbind(const at::Tensor & self, at::Dimname dim) {
8966 return at::_ops::unbind_Dimname::call(self, dim);
8967}
8968
8969// aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor
8970inline at::Tensor mkldnn_reorder_conv2d_weight(const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=c10::nullopt) {
8971 return at::_ops::mkldnn_reorder_conv2d_weight::call(self, padding, stride, dilation, groups, input_size);
8972}
8973
8974// aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor
8975inline at::Tensor mkldnn_reorder_conv3d_weight(const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1) {
8976 return at::_ops::mkldnn_reorder_conv3d_weight::call(self, padding, stride, dilation, groups);
8977}
8978
8979// aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor
8980inline at::Tensor to_mkldnn_backward(const at::Tensor & grad, const at::Tensor & input) {
8981 return at::_ops::to_mkldnn_backward::call(grad, input);
8982}
8983
8984// aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor
8985inline at::Tensor quantize_per_tensor_dynamic(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
8986 return at::_ops::quantize_per_tensor_dynamic::call(self, dtype, reduce_range);
8987}
8988
8989// aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
8990inline at::Tensor quantize_per_tensor(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
8991 return at::_ops::quantize_per_tensor::call(self, scale, zero_point, dtype);
8992}
8993
8994// aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor
8995inline at::Tensor quantize_per_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
8996 return at::_ops::quantize_per_tensor_tensor_qparams::call(self, scale, zero_point, dtype);
8997}
8998
8999// aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
9000inline ::std::vector<at::Tensor> quantize_per_tensor(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
9001 return at::_ops::quantize_per_tensor_tensors::call(tensors, scales, zero_points, dtype);
9002}
9003
9004// aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
9005inline at::Tensor quantize_per_channel(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
9006 return at::_ops::quantize_per_channel::call(self, scales, zero_points, axis, dtype);
9007}
9008
9009// aten::dequantize.self(Tensor self) -> Tensor
9010inline at::Tensor dequantize(const at::Tensor & self) {
9011 return at::_ops::dequantize_self::call(self);
9012}
9013
9014// aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]
9015inline ::std::vector<at::Tensor> dequantize(at::TensorList tensors) {
9016 return at::_ops::dequantize_tensors::call(tensors);
9017}
9018
9019// aten::q_scale(Tensor self) -> float
9020inline double q_scale(const at::Tensor & self) {
9021 return at::_ops::q_scale::call(self);
9022}
9023
9024// aten::q_zero_point(Tensor self) -> int
9025inline int64_t q_zero_point(const at::Tensor & self) {
9026 return at::_ops::q_zero_point::call(self);
9027}
9028
9029// aten::q_per_channel_scales(Tensor self) -> Tensor
9031 return at::_ops::q_per_channel_scales::call(self);
9032}
9033
9034// aten::q_per_channel_zero_points(Tensor self) -> Tensor
9036 return at::_ops::q_per_channel_zero_points::call(self);
9037}
9038
9039// aten::q_per_channel_axis(Tensor self) -> int
9040inline int64_t q_per_channel_axis(const at::Tensor & self) {
9041 return at::_ops::q_per_channel_axis::call(self);
9042}
9043
9044// aten::int_repr(Tensor self) -> Tensor
9045inline at::Tensor int_repr(const at::Tensor & self) {
9046 return at::_ops::int_repr::call(self);
9047}
9048
9049// aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
9050inline at::Tensor _make_per_tensor_quantized_tensor(const at::Tensor & self, double scale, int64_t zero_point) {
9051 return at::_ops::_make_per_tensor_quantized_tensor::call(self, scale, zero_point);
9052}
9053
9054// aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
9055inline at::Tensor _make_per_channel_quantized_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
9056 return at::_ops::_make_per_channel_quantized_tensor::call(self, scale, zero_point, axis);
9057}
9058
9059// aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor
9060inline at::Tensor fake_quantize_per_tensor_affine(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
9061 return at::_ops::fake_quantize_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max);
9062}
9063
9064// aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor
9065inline at::Tensor fake_quantize_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
9066 return at::_ops::fake_quantize_per_tensor_affine_tensor_qparams::call(self, scale, zero_point, quant_min, quant_max);
9067}
9068
9069// aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
9070inline ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
9071 return at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self, scale, zero_point, quant_min, quant_max);
9072}
9073
9074// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
9075inline ::std::tuple<at::Tensor,at::Tensor> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
9076 return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
9077}
9078
9079// aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
9081 return at::_ops::fake_quantize_per_tensor_affine_cachemask_backward::call(grad, mask);
9082}
9083
9084// aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
9085inline at::Tensor _fake_quantize_learnable_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) {
9086 return at::_ops::_fake_quantize_learnable_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max, grad_factor);
9087}
9088
9089// aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
9090inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_tensor_affine_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) {
9091 return at::_ops::_fake_quantize_learnable_per_tensor_affine_backward::call(grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
9092}
9093
9094// aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor
9095inline at::Tensor fake_quantize_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
9096 return at::_ops::fake_quantize_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max);
9097}
9098
9099// aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
9100inline ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
9101 return at::_ops::fake_quantize_per_channel_affine_cachemask::call(self, scale, zero_point, axis, quant_min, quant_max);
9102}
9103
9104// aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
9106 return at::_ops::fake_quantize_per_channel_affine_cachemask_backward::call(grad, mask);
9107}
9108
9109// aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor
9110inline at::Tensor _fake_quantize_learnable_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) {
9111 return at::_ops::_fake_quantize_learnable_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
9112}
9113
9114// aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
9115inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_channel_affine_backward(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) {
9116 return at::_ops::_fake_quantize_learnable_per_channel_affine_backward::call(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
9117}
9118
9119// aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor
9120inline at::Tensor fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) {
9121 return at::_ops::fused_moving_avg_obs_fake_quant::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
9122}
9123
9124// aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)
9125inline ::std::tuple<at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) {
9126 return at::_ops::_fused_moving_avg_obs_fq_helper::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
9127}
9128
9129// aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
9130inline ::std::tuple<double,int64_t> _choose_qparams_per_tensor(const at::Tensor & self, bool reduce_range=false) {
9131 return at::_ops::_choose_qparams_per_tensor::call(self, reduce_range);
9132}
9133
9134// aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor
9136 return at::_ops::_saturate_weight_to_fp16::call(weight);
9137}
9138
9139// aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)
9140inline ::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) {
9141 return at::_ops::choose_qparams_optimized::call(input, numel, n_bins, ratio, bit_width);
9142}
9143
9144// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
9145inline at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options={}, bool non_blocking=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
9146 return at::_ops::_to_copy::call(self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
9147}
9148// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
9150 return at::_ops::_to_copy::call(self, dtype, layout, device, pin_memory, non_blocking, memory_format);
9151}
9152
9153// aten::meshgrid(Tensor[] tensors) -> Tensor[]
9154inline ::std::vector<at::Tensor> meshgrid(at::TensorList tensors) {
9155 return at::_ops::meshgrid::call(tensors);
9156}
9157
9158// aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]
9159inline ::std::vector<at::Tensor> meshgrid(at::TensorList tensors, c10::string_view indexing) {
9160 return at::_ops::meshgrid_indexing::call(tensors, indexing);
9161}
9162
9163// aten::cartesian_prod(Tensor[] tensors) -> Tensor
9165 return at::_ops::cartesian_prod::call(tensors);
9166}
9167
9168// aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
9169inline at::Tensor combinations(const at::Tensor & self, int64_t r=2, bool with_replacement=false) {
9170 return at::_ops::combinations::call(self, r, with_replacement);
9171}
9172
9173// aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType
9174inline at::ScalarType result_type(const at::Tensor & tensor, const at::Tensor & other) {
9175 return at::_ops::result_type_Tensor::call(tensor, other);
9176}
9177
9178// aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType
9179inline at::ScalarType result_type(const at::Tensor & tensor, const at::Scalar & other) {
9180 return at::_ops::result_type_Scalar::call(tensor, other);
9181}
9182
9183// aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType
9184inline at::ScalarType result_type(const at::Scalar & scalar, const at::Tensor & tensor) {
9185 return at::_ops::result_type_Scalar_Tensor::call(scalar, tensor);
9186}
9187
9188// aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType
9189inline at::ScalarType result_type(const at::Scalar & scalar1, const at::Scalar & scalar2) {
9190 return at::_ops::result_type_Scalar_Scalar::call(scalar1, scalar2);
9191}
9192
9193// aten::can_cast(ScalarType from, ScalarType to) -> bool
9194inline bool can_cast(at::ScalarType from, at::ScalarType to) {
9195 return at::_ops::can_cast::call(from, to);
9196}
9197
9198// aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType
9199inline at::ScalarType promote_types(at::ScalarType type1, at::ScalarType type2) {
9200 return at::_ops::promote_types::call(type1, type2);
9201}
9202
9203// aten::_local_scalar_dense(Tensor self) -> Scalar
9204inline at::Scalar _local_scalar_dense(const at::Tensor & self) {
9205 return at::_ops::_local_scalar_dense::call(self);
9206}
9207
9208// aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)
9209inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _lstm_mps(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
9210 return at::_ops::_lstm_mps::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
9211}
9212
9213// aten::lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])
9214inline ::std::tuple<at::Tensor,::std::vector<at::Tensor>,::std::vector<at::Tensor>> lstm_mps_backward(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
9215 return at::_ops::lstm_mps_backward::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
9216}
9217
9218// aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)
9219inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias={}, const c10::optional<at::Tensor> & hidden_bias={}) {
9220 return at::_ops::_thnn_fused_lstm_cell::call(input_gates, hidden_gates, cx, input_bias, hidden_bias);
9221}
9222
9223// aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)
9224inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_impl(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
9225 return at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
9226}
9227
9228// aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
9229inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
9230 return at::_ops::_thnn_fused_lstm_cell_backward::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
9231}
9232
9233// aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
9234inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) {
9235 return at::_ops::_thnn_differentiable_lstm_cell_backward::call(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
9236}
9237
9238// aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)
9239inline ::std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias={}, const c10::optional<at::Tensor> & hidden_bias={}) {
9240 return at::_ops::_thnn_fused_gru_cell::call(input_gates, hidden_gates, hx, input_bias, hidden_bias);
9241}
9242
9243// aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
9244inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) {
9245 return at::_ops::_thnn_fused_gru_cell_backward::call(grad_hy, workspace, has_bias);
9246}
9247
9248// aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
9249inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
9250 return at::_ops::_thnn_differentiable_gru_cell_backward::call(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
9251}
9252
9253// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
9254inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
9255 return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
9256}
9257
9258// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)
9259inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
9260 return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
9261}
9262
9263// aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
9264inline ::std::tuple<at::Tensor,at::Tensor> gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
9265 return at::_ops::gru_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
9266}
9267
9268// aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
9269inline ::std::tuple<at::Tensor,at::Tensor> gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
9270 return at::_ops::gru_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
9271}
9272
9273// aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
9274inline ::std::tuple<at::Tensor,at::Tensor> rnn_tanh(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
9275 return at::_ops::rnn_tanh_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
9276}
9277
9278// aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
9279inline ::std::tuple<at::Tensor,at::Tensor> rnn_tanh(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
9280 return at::_ops::rnn_tanh_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
9281}
9282
9283// aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
9284inline ::std::tuple<at::Tensor,at::Tensor> rnn_relu(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
9285 return at::_ops::rnn_relu_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
9286}
9287
9288// aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
9289inline ::std::tuple<at::Tensor,at::Tensor> rnn_relu(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
9290 return at::_ops::rnn_relu_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
9291}
9292
9293// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)
9294inline ::std::tuple<at::Tensor,at::Tensor> lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih={}, const c10::optional<at::Tensor> & b_hh={}) {
9295 return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
9296}
9297
9298// aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
9299inline at::Tensor gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih={}, const c10::optional<at::Tensor> & b_hh={}) {
9300 return at::_ops::gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
9301}
9302
9303// aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
9304inline at::Tensor rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih={}, const c10::optional<at::Tensor> & b_hh={}) {
9305 return at::_ops::rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
9306}
9307
9308// aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
9309inline at::Tensor rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih={}, const c10::optional<at::Tensor> & b_hh={}) {
9310 return at::_ops::rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
9311}
9312
9313// aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)
9314inline ::std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
9315 return at::_ops::quantized_lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
9316}
9317
9318// aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
9319inline at::Tensor quantized_gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
9320 return at::_ops::quantized_gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
9321}
9322
9323// aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
9324inline at::Tensor quantized_rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
9325 return at::_ops::quantized_rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
9326}
9327
9328// aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
9329inline at::Tensor quantized_rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
9330 return at::_ops::quantized_rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
9331}
9332
9333// aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
9334inline ::std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) {
9335 return at::_ops::_pack_padded_sequence::call(input, lengths, batch_first);
9336}
9337
9338// aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
9339inline at::Tensor _pack_padded_sequence_backward(const at::Tensor & grad, at::IntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
9340 return at::_ops::_pack_padded_sequence_backward::call(grad, c10::fromIntArrayRefSlow(input_size), batch_sizes, batch_first);
9341}
9342namespace symint {
9343 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
9344 at::Tensor _pack_padded_sequence_backward(const at::Tensor & grad, at::IntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
9345 return at::_ops::_pack_padded_sequence_backward::call(grad, c10::fromIntArrayRefSlow(input_size), batch_sizes, batch_first);
9346 }
9347}
9348
9349// aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
9350inline at::Tensor _pack_padded_sequence_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
9351 return at::_ops::_pack_padded_sequence_backward::call(grad, input_size, batch_sizes, batch_first);
9352}
9353namespace symint {
9354 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
9355 at::Tensor _pack_padded_sequence_backward(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
9356 return at::_ops::_pack_padded_sequence_backward::call(grad, input_size, batch_sizes, batch_first);
9357 }
9358}
9359
9360// aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
9361inline ::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) {
9362 return at::_ops::_pad_packed_sequence::call(data, batch_sizes, batch_first, padding_value, total_length);
9363}
9364
9365namespace symint {
9366 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
9367 at::Tensor & set_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) {
9368 return at::_ops::set__source_Storage_storage_offset::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
9369 }
9370}
9371
9372namespace symint {
9373 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
9374 at::Tensor & set_(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) {
9375 return at::_ops::set__source_Storage_storage_offset::call(self, source, storage_offset, size, stride);
9376 }
9377}
9378
9379namespace symint {
9380 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
9381 at::Tensor & set_(at::Tensor & self, const at::Tensor & source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) {
9382 return at::_ops::set__source_Tensor_storage_offset::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
9383 }
9384}
9385
9386namespace symint {
9387 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
9388 at::Tensor & set_(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) {
9389 return at::_ops::set__source_Tensor_storage_offset::call(self, source, storage_offset, size, stride);
9390 }
9391}
9392
9393// aten::lift(Tensor self) -> Tensor
9394inline at::Tensor lift(const at::Tensor & self) {
9395 return at::_ops::lift::call(self);
9396}
9397
9398// aten::lift_fresh(Tensor(a) self) -> Tensor(a)
9399inline at::Tensor lift_fresh(const at::Tensor & self) {
9400 return at::_ops::lift_fresh::call(self);
9401}
9402
9403// aten::lift_fresh_copy(Tensor self) -> Tensor
9405 return at::_ops::lift_fresh_copy::call(self);
9406}
9407
9408// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
9409inline at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
9410 return at::_ops::masked_fill_Scalar::call(self, mask, value);
9411}
9412
9413// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
9414inline at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
9415 return at::_ops::masked_fill_Tensor::call(self, mask, value);
9416}
9417
9418// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor
9419inline at::Tensor masked_scatter(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
9420 return at::_ops::masked_scatter::call(self, mask, source);
9421}
9422
9423// aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor
9425 return at::_ops::_masked_softmax::call(self, mask, dim, mask_type);
9426}
9427
9428// aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor
9429inline at::Tensor _masked_softmax_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim=c10::nullopt) {
9430 return at::_ops::_masked_softmax_backward::call(grad_output, output, mask, dim);
9431}
9432
9433namespace symint {
9434 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
9435 at::Tensor view(const at::Tensor & self, at::IntArrayRef size) {
9436 return at::_ops::view::call(self, c10::fromIntArrayRefSlow(size));
9437 }
9438}
9439
9440namespace symint {
9441 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
9442 at::Tensor view(const at::Tensor & self, c10::SymIntArrayRef size) {
9443 return at::_ops::view::call(self, size);
9444 }
9445}
9446
9447// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor
9448inline at::Tensor put(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false) {
9449 return at::_ops::put::call(self, index, source, accumulate);
9450}
9451
9452// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
9453inline at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) {
9454 return at::_ops::index_add_out::call(self, dim, index, source, alpha, out);
9455}
9456// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
9457inline at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) {
9458 return at::_ops::index_add_out::call(self, dim, index, source, alpha, out);
9459}
9460
9461// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
9462inline at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) {
9463 return at::_ops::index_add::call(self, dim, index, source, alpha);
9464}
9465
9466// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor
9467inline at::Tensor index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) {
9468 return at::_ops::index_add_dimname::call(self, dim, index, source, alpha);
9469}
9470
9471// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
9472inline at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) {
9473 return at::_ops::index_reduce_out::call(self, dim, index, source, reduce, include_self, out);
9474}
9475// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
9476inline at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) {
9477 return at::_ops::index_reduce_out::call(self, dim, index, source, reduce, include_self, out);
9478}
9479
9480// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor
9481inline at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) {
9482 return at::_ops::index_reduce::call(self, dim, index, source, reduce, include_self);
9483}
9484
9485// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
9486inline at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
9487 return at::_ops::index_fill_int_Scalar::call(self, dim, index, value);
9488}
9489
9490// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
9491inline at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
9492 return at::_ops::index_fill_int_Tensor::call(self, dim, index, value);
9493}
9494
9495// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
9496inline at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
9497 return at::_ops::index_fill_Dimname_Scalar::call(self, dim, index, value);
9498}
9499
9500// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
9501inline at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
9502 return at::_ops::index_fill_Dimname_Tensor::call(self, dim, index, value);
9503}
9504
9505// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
9506inline at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
9507 return at::_ops::scatter_src::call(self, dim, index, src);
9508}
9509
9510// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
9511inline at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
9512 return at::_ops::scatter_src_out::call(self, dim, index, src, out);
9513}
9514// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
9515inline at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
9516 return at::_ops::scatter_src_out::call(self, dim, index, src, out);
9517}
9518
9519// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
9520inline at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
9521 return at::_ops::scatter_value::call(self, dim, index, value);
9522}
9523
9524// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
9525inline at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
9526 return at::_ops::scatter_value_out::call(self, dim, index, value, out);
9527}
9528// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
9529inline at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
9530 return at::_ops::scatter_value_out::call(self, dim, index, value, out);
9531}
9532
9533// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
9534inline at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
9535 return at::_ops::scatter_reduce::call(self, dim, index, src, reduce);
9536}
9537
9538// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)
9539inline at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
9540 return at::_ops::scatter_reduce_out::call(self, dim, index, src, reduce, out);
9541}
9542// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)
9543inline at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) {
9544 return at::_ops::scatter_reduce_out::call(self, dim, index, src, reduce, out);
9545}
9546
9547// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
9548inline at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
9549 return at::_ops::scatter_value_reduce::call(self, dim, index, value, reduce);
9550}
9551
9552// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)
9553inline at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
9554 return at::_ops::scatter_value_reduce_out::call(self, dim, index, value, reduce, out);
9555}
9556// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)
9557inline at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) {
9558 return at::_ops::scatter_value_reduce_out::call(self, dim, index, value, reduce, out);
9559}
9560
9561// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
9562inline at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
9563 return at::_ops::scatter_dimname_src::call(self, dim, index, src);
9564}
9565
9566// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
9567inline at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
9568 return at::_ops::scatter_dimname_value::call(self, dim, index, value);
9569}
9570
9571// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
9572inline at::Tensor scatter_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
9573 return at::_ops::scatter_add::call(self, dim, index, src);
9574}
9575
9576// aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
9577inline at::Tensor & scatter_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
9578 return at::_ops::scatter_add_out::call(self, dim, index, src, out);
9579}
9580// aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
9581inline at::Tensor & scatter_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
9582 return at::_ops::scatter_add_out::call(self, dim, index, src, out);
9583}
9584
9585// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
9586inline at::Tensor scatter_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
9587 return at::_ops::scatter_add_dimname::call(self, dim, index, src);
9588}
9589
9590// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor
9591inline at::Tensor scatter_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) {
9592 return at::_ops::scatter_reduce_two::call(self, dim, index, src, reduce, include_self);
9593}
9594
9595// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
9596inline at::Tensor & scatter_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) {
9597 return at::_ops::scatter_reduce_two_out::call(self, dim, index, src, reduce, include_self, out);
9598}
9599// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)
9600inline at::Tensor & scatter_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) {
9601 return at::_ops::scatter_reduce_two_out::call(self, dim, index, src, reduce, include_self, out);
9602}
9603
9604// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9605inline at::Tensor & bitwise_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9606 return at::_ops::bitwise_and_Tensor_out::call(self, other, out);
9607}
9608// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9609inline at::Tensor & bitwise_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9610 return at::_ops::bitwise_and_Tensor_out::call(self, other, out);
9611}
9612
9613// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9614inline at::Tensor & bitwise_and_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
9615 return at::_ops::bitwise_and_Scalar_out::call(self, other, out);
9616}
9617// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9618inline at::Tensor & bitwise_and_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
9619 return at::_ops::bitwise_and_Scalar_out::call(self, other, out);
9620}
9621
9622// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
9623inline at::Tensor bitwise_and(const at::Tensor & self, const at::Scalar & other) {
9624 return at::_ops::bitwise_and_Scalar::call(self, other);
9625}
9626
9627// aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
9628inline at::Tensor bitwise_and(const at::Scalar & self, const at::Tensor & other) {
9629 return at::_ops::bitwise_and_Scalar_Tensor::call(self, other);
9630}
9631
9632// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
9633inline at::Tensor bitwise_and(const at::Tensor & self, const at::Tensor & other) {
9634 return at::_ops::bitwise_and_Tensor::call(self, other);
9635}
9636
9637// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor
9638inline at::Tensor __and__(const at::Tensor & self, const at::Scalar & other) {
9639 return at::_ops::__and___Scalar::call(self, other);
9640}
9641
9642// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor
9643inline at::Tensor __and__(const at::Tensor & self, const at::Tensor & other) {
9644 return at::_ops::__and___Tensor::call(self, other);
9645}
9646
9647// aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9648inline at::Tensor & bitwise_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9649 return at::_ops::bitwise_or_Tensor_out::call(self, other, out);
9650}
9651// aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9652inline at::Tensor & bitwise_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9653 return at::_ops::bitwise_or_Tensor_out::call(self, other, out);
9654}
9655
9656// aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9657inline at::Tensor & bitwise_or_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
9658 return at::_ops::bitwise_or_Scalar_out::call(self, other, out);
9659}
9660// aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9661inline at::Tensor & bitwise_or_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
9662 return at::_ops::bitwise_or_Scalar_out::call(self, other, out);
9663}
9664
9665// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
9666inline at::Tensor bitwise_or(const at::Tensor & self, const at::Scalar & other) {
9667 return at::_ops::bitwise_or_Scalar::call(self, other);
9668}
9669
9670// aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
9671inline at::Tensor bitwise_or(const at::Scalar & self, const at::Tensor & other) {
9672 return at::_ops::bitwise_or_Scalar_Tensor::call(self, other);
9673}
9674
9675// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
9676inline at::Tensor bitwise_or(const at::Tensor & self, const at::Tensor & other) {
9677 return at::_ops::bitwise_or_Tensor::call(self, other);
9678}
9679
9680// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor
9681inline at::Tensor __or__(const at::Tensor & self, const at::Scalar & other) {
9682 return at::_ops::__or___Scalar::call(self, other);
9683}
9684
9685// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor
9686inline at::Tensor __or__(const at::Tensor & self, const at::Tensor & other) {
9687 return at::_ops::__or___Tensor::call(self, other);
9688}
9689
9690// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9691inline at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9692 return at::_ops::bitwise_xor_Tensor_out::call(self, other, out);
9693}
9694// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9695inline at::Tensor & bitwise_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9696 return at::_ops::bitwise_xor_Tensor_out::call(self, other, out);
9697}
9698
9699// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9700inline at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
9701 return at::_ops::bitwise_xor_Scalar_out::call(self, other, out);
9702}
9703// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9704inline at::Tensor & bitwise_xor_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
9705 return at::_ops::bitwise_xor_Scalar_out::call(self, other, out);
9706}
9707
9708// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
9709inline at::Tensor bitwise_xor(const at::Tensor & self, const at::Scalar & other) {
9710 return at::_ops::bitwise_xor_Scalar::call(self, other);
9711}
9712
9713// aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
9714inline at::Tensor bitwise_xor(const at::Scalar & self, const at::Tensor & other) {
9715 return at::_ops::bitwise_xor_Scalar_Tensor::call(self, other);
9716}
9717
9718// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
9719inline at::Tensor bitwise_xor(const at::Tensor & self, const at::Tensor & other) {
9720 return at::_ops::bitwise_xor_Tensor::call(self, other);
9721}
9722
9723// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor
9724inline at::Tensor __xor__(const at::Tensor & self, const at::Scalar & other) {
9725 return at::_ops::__xor___Scalar::call(self, other);
9726}
9727
9728// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor
9729inline at::Tensor __xor__(const at::Tensor & self, const at::Tensor & other) {
9730 return at::_ops::__xor___Tensor::call(self, other);
9731}
9732
9733// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor
9734inline at::Tensor __lshift__(const at::Tensor & self, const at::Scalar & other) {
9735 return at::_ops::__lshift___Scalar::call(self, other);
9736}
9737
9738// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor
9739inline at::Tensor __lshift__(const at::Tensor & self, const at::Tensor & other) {
9740 return at::_ops::__lshift___Tensor::call(self, other);
9741}
9742
9743// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
9744inline at::Tensor bitwise_left_shift(const at::Tensor & self, const at::Tensor & other) {
9745 return at::_ops::bitwise_left_shift_Tensor::call(self, other);
9746}
9747
9748// aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9749inline at::Tensor & bitwise_left_shift_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9750 return at::_ops::bitwise_left_shift_Tensor_out::call(self, other, out);
9751}
9752// aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9753inline at::Tensor & bitwise_left_shift_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9754 return at::_ops::bitwise_left_shift_Tensor_out::call(self, other, out);
9755}
9756
9757// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
9758inline at::Tensor bitwise_left_shift(const at::Tensor & self, const at::Scalar & other) {
9759 return at::_ops::bitwise_left_shift_Tensor_Scalar::call(self, other);
9760}
9761
9762// aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9763inline at::Tensor & bitwise_left_shift_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
9764 return at::_ops::bitwise_left_shift_Tensor_Scalar_out::call(self, other, out);
9765}
9766// aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9767inline at::Tensor & bitwise_left_shift_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
9768 return at::_ops::bitwise_left_shift_Tensor_Scalar_out::call(self, other, out);
9769}
9770
9771// aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
9772inline at::Tensor bitwise_left_shift(const at::Scalar & self, const at::Tensor & other) {
9773 return at::_ops::bitwise_left_shift_Scalar_Tensor::call(self, other);
9774}
9775
9776// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor
9777inline at::Tensor __rshift__(const at::Tensor & self, const at::Scalar & other) {
9778 return at::_ops::__rshift___Scalar::call(self, other);
9779}
9780
9781// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor
9782inline at::Tensor __rshift__(const at::Tensor & self, const at::Tensor & other) {
9783 return at::_ops::__rshift___Tensor::call(self, other);
9784}
9785
9786// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
9787inline at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Tensor & other) {
9788 return at::_ops::bitwise_right_shift_Tensor::call(self, other);
9789}
9790
9791// aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9792inline at::Tensor & bitwise_right_shift_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9793 return at::_ops::bitwise_right_shift_Tensor_out::call(self, other, out);
9794}
9795// aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9796inline at::Tensor & bitwise_right_shift_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9797 return at::_ops::bitwise_right_shift_Tensor_out::call(self, other, out);
9798}
9799
9800// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
9801inline at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Scalar & other) {
9802 return at::_ops::bitwise_right_shift_Tensor_Scalar::call(self, other);
9803}
9804
9805// aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9806inline at::Tensor & bitwise_right_shift_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
9807 return at::_ops::bitwise_right_shift_Tensor_Scalar_out::call(self, other, out);
9808}
9809// aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9810inline at::Tensor & bitwise_right_shift_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
9811 return at::_ops::bitwise_right_shift_Tensor_Scalar_out::call(self, other, out);
9812}
9813
9814// aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
9815inline at::Tensor bitwise_right_shift(const at::Scalar & self, const at::Tensor & other) {
9816 return at::_ops::bitwise_right_shift_Scalar_Tensor::call(self, other);
9817}
9818
9819// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
9820inline at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
9821 return at::_ops::addbmm_out::call(self, batch1, batch2, beta, alpha, out);
9822}
9823// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
9824inline at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
9825 return at::_ops::addbmm_out::call(self, batch1, batch2, beta, alpha, out);
9826}
9827
9828// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
9829inline at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
9830 return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha);
9831}
9832
9833// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
9834inline at::Tensor & diag_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0) {
9835 return at::_ops::diag_out::call(self, diagonal, out);
9836}
9837// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
9838inline at::Tensor & diag_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
9839 return at::_ops::diag_out::call(self, diagonal, out);
9840}
9841
9842// aten::diag(Tensor self, int diagonal=0) -> Tensor
9843inline at::Tensor diag(const at::Tensor & self, int64_t diagonal=0) {
9844 return at::_ops::diag::call(self, diagonal);
9845}
9846
9847// aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
9849 return at::_ops::cross_out::call(self, other, dim, out);
9850}
9851// aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
9852inline at::Tensor & cross_outf(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim, at::Tensor & out) {
9853 return at::_ops::cross_out::call(self, other, dim, out);
9854}
9855
9856// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor
9858 return at::_ops::cross::call(self, other, dim);
9859}
9860
9861// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
9862inline at::Tensor & triu_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0) {
9863 return at::_ops::triu_out::call(self, diagonal, out);
9864}
9865// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
9866inline at::Tensor & triu_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
9867 return at::_ops::triu_out::call(self, diagonal, out);
9868}
9869
9870// aten::triu(Tensor self, int diagonal=0) -> Tensor
9871inline at::Tensor triu(const at::Tensor & self, int64_t diagonal=0) {
9872 return at::_ops::triu::call(self, diagonal);
9873}
9874
9875// aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
9876inline at::Tensor & tril_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0) {
9877 return at::_ops::tril_out::call(self, diagonal, out);
9878}
9879// aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
9880inline at::Tensor & tril_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
9881 return at::_ops::tril_out::call(self, diagonal, out);
9882}
9883
9884// aten::tril(Tensor self, int diagonal=0) -> Tensor
9885inline at::Tensor tril(const at::Tensor & self, int64_t diagonal=0) {
9886 return at::_ops::tril::call(self, diagonal);
9887}
9888
9889// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
9890inline at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong) {
9891 return at::_ops::tril_indices::call(row, col, offset, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
9892}
9893// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
9894inline at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
9895 return at::_ops::tril_indices::call(row, col, offset, dtype, layout, device, pin_memory);
9896}
9897
9898// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
9899inline at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong) {
9900 return at::_ops::triu_indices::call(row, col, offset, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
9901}
9902// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
9903inline at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
9904 return at::_ops::triu_indices::call(row, col, offset, dtype, layout, device, pin_memory);
9905}
9906
9907// aten::trace(Tensor self) -> Tensor
9908inline at::Tensor trace(const at::Tensor & self) {
9909 return at::_ops::trace::call(self);
9910}
9911
9912// aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor
9913inline at::Tensor trace_backward(const at::Tensor & grad, at::IntArrayRef sizes) {
9914 return at::_ops::trace_backward::call(grad, c10::fromIntArrayRefSlow(sizes));
9915}
9916namespace symint {
9917 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
9918 at::Tensor trace_backward(const at::Tensor & grad, at::IntArrayRef sizes) {
9919 return at::_ops::trace_backward::call(grad, c10::fromIntArrayRefSlow(sizes));
9920 }
9921}
9922
9923// aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor
9924inline at::Tensor trace_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef sizes) {
9925 return at::_ops::trace_backward::call(grad, sizes);
9926}
9927namespace symint {
9928 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
9929 at::Tensor trace_backward(const at::Tensor & grad, c10::SymIntArrayRef sizes) {
9930 return at::_ops::trace_backward::call(grad, sizes);
9931 }
9932}
9933
9934// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9935inline at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
9936 return at::_ops::ne_Scalar_out::call(self, other, out);
9937}
9938// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9939inline at::Tensor & ne_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
9940 return at::_ops::ne_Scalar_out::call(self, other, out);
9941}
9942
9943// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor
9944inline at::Tensor ne(const at::Tensor & self, const at::Scalar & other) {
9945 return at::_ops::ne_Scalar::call(self, other);
9946}
9947
9948// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9949inline at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9950 return at::_ops::ne_Tensor_out::call(self, other, out);
9951}
9952// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9953inline at::Tensor & ne_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9954 return at::_ops::ne_Tensor_out::call(self, other, out);
9955}
9956
9957// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor
9958inline at::Tensor ne(const at::Tensor & self, const at::Tensor & other) {
9959 return at::_ops::ne_Tensor::call(self, other);
9960}
9961
9962// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9963inline at::Tensor & not_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
9964 return at::_ops::not_equal_Scalar_out::call(self, other, out);
9965}
9966// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9967inline at::Tensor & not_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
9968 return at::_ops::not_equal_Scalar_out::call(self, other, out);
9969}
9970
9971// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor
9972inline at::Tensor not_equal(const at::Tensor & self, const at::Scalar & other) {
9973 return at::_ops::not_equal_Scalar::call(self, other);
9974}
9975
9976// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9977inline at::Tensor & not_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
9978 return at::_ops::not_equal_Tensor_out::call(self, other, out);
9979}
9980// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
9981inline at::Tensor & not_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
9982 return at::_ops::not_equal_Tensor_out::call(self, other, out);
9983}
9984
9985// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor
9986inline at::Tensor not_equal(const at::Tensor & self, const at::Tensor & other) {
9987 return at::_ops::not_equal_Tensor::call(self, other);
9988}
9989
9990// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9991inline at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
9992 return at::_ops::eq_Scalar_out::call(self, other, out);
9993}
9994// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
9995inline at::Tensor & eq_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
9996 return at::_ops::eq_Scalar_out::call(self, other, out);
9997}
9998
9999// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor
10000inline at::Tensor eq(const at::Tensor & self, const at::Scalar & other) {
10001 return at::_ops::eq_Scalar::call(self, other);
10002}
10003
10004// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10005inline at::Tensor & eq_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10006 return at::_ops::eq_Tensor_out::call(self, other, out);
10007}
10008// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10009inline at::Tensor & eq_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10010 return at::_ops::eq_Tensor_out::call(self, other, out);
10011}
10012
10013// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor
10014inline at::Tensor eq(const at::Tensor & self, const at::Tensor & other) {
10015 return at::_ops::eq_Tensor::call(self, other);
10016}
10017
10018// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10019inline at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10020 return at::_ops::ge_Scalar_out::call(self, other, out);
10021}
10022// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10023inline at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10024 return at::_ops::ge_Scalar_out::call(self, other, out);
10025}
10026
10027// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor
10028inline at::Tensor ge(const at::Tensor & self, const at::Scalar & other) {
10029 return at::_ops::ge_Scalar::call(self, other);
10030}
10031
10032// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10033inline at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10034 return at::_ops::ge_Tensor_out::call(self, other, out);
10035}
10036// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10037inline at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10038 return at::_ops::ge_Tensor_out::call(self, other, out);
10039}
10040
10041// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor
10042inline at::Tensor ge(const at::Tensor & self, const at::Tensor & other) {
10043 return at::_ops::ge_Tensor::call(self, other);
10044}
10045
10046// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10047inline at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10048 return at::_ops::greater_equal_Scalar_out::call(self, other, out);
10049}
10050// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10051inline at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10052 return at::_ops::greater_equal_Scalar_out::call(self, other, out);
10053}
10054
10055// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor
10056inline at::Tensor greater_equal(const at::Tensor & self, const at::Scalar & other) {
10057 return at::_ops::greater_equal_Scalar::call(self, other);
10058}
10059
10060// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10061inline at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10062 return at::_ops::greater_equal_Tensor_out::call(self, other, out);
10063}
10064// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10065inline at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10066 return at::_ops::greater_equal_Tensor_out::call(self, other, out);
10067}
10068
10069// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor
10070inline at::Tensor greater_equal(const at::Tensor & self, const at::Tensor & other) {
10071 return at::_ops::greater_equal_Tensor::call(self, other);
10072}
10073
10074// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10075inline at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10076 return at::_ops::le_Scalar_out::call(self, other, out);
10077}
10078// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10079inline at::Tensor & le_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10080 return at::_ops::le_Scalar_out::call(self, other, out);
10081}
10082
10083// aten::le.Scalar(Tensor self, Scalar other) -> Tensor
10084inline at::Tensor le(const at::Tensor & self, const at::Scalar & other) {
10085 return at::_ops::le_Scalar::call(self, other);
10086}
10087
10088// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10089inline at::Tensor & le_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10090 return at::_ops::le_Tensor_out::call(self, other, out);
10091}
10092// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10093inline at::Tensor & le_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10094 return at::_ops::le_Tensor_out::call(self, other, out);
10095}
10096
10097// aten::le.Tensor(Tensor self, Tensor other) -> Tensor
10098inline at::Tensor le(const at::Tensor & self, const at::Tensor & other) {
10099 return at::_ops::le_Tensor::call(self, other);
10100}
10101
10102// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10103inline at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10104 return at::_ops::less_equal_Scalar_out::call(self, other, out);
10105}
10106// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10107inline at::Tensor & less_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10108 return at::_ops::less_equal_Scalar_out::call(self, other, out);
10109}
10110
10111// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor
10112inline at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other) {
10113 return at::_ops::less_equal_Scalar::call(self, other);
10114}
10115
10116// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10117inline at::Tensor & less_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10118 return at::_ops::less_equal_Tensor_out::call(self, other, out);
10119}
10120// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10121inline at::Tensor & less_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10122 return at::_ops::less_equal_Tensor_out::call(self, other, out);
10123}
10124
10125// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor
10126inline at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other) {
10127 return at::_ops::less_equal_Tensor::call(self, other);
10128}
10129
10130// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10131inline at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10132 return at::_ops::gt_Scalar_out::call(self, other, out);
10133}
10134// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10135inline at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10136 return at::_ops::gt_Scalar_out::call(self, other, out);
10137}
10138
10139// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor
10140inline at::Tensor gt(const at::Tensor & self, const at::Scalar & other) {
10141 return at::_ops::gt_Scalar::call(self, other);
10142}
10143
10144// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10145inline at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10146 return at::_ops::gt_Tensor_out::call(self, other, out);
10147}
10148// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10149inline at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10150 return at::_ops::gt_Tensor_out::call(self, other, out);
10151}
10152
10153// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor
10154inline at::Tensor gt(const at::Tensor & self, const at::Tensor & other) {
10155 return at::_ops::gt_Tensor::call(self, other);
10156}
10157
10158// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10159inline at::Tensor & greater_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10160 return at::_ops::greater_Scalar_out::call(self, other, out);
10161}
10162// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10163inline at::Tensor & greater_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10164 return at::_ops::greater_Scalar_out::call(self, other, out);
10165}
10166
10167// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor
10168inline at::Tensor greater(const at::Tensor & self, const at::Scalar & other) {
10169 return at::_ops::greater_Scalar::call(self, other);
10170}
10171
10172// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10173inline at::Tensor & greater_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10174 return at::_ops::greater_Tensor_out::call(self, other, out);
10175}
10176// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10177inline at::Tensor & greater_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10178 return at::_ops::greater_Tensor_out::call(self, other, out);
10179}
10180
10181// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor
10182inline at::Tensor greater(const at::Tensor & self, const at::Tensor & other) {
10183 return at::_ops::greater_Tensor::call(self, other);
10184}
10185
10186// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10187inline at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10188 return at::_ops::lt_Scalar_out::call(self, other, out);
10189}
10190// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10191inline at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10192 return at::_ops::lt_Scalar_out::call(self, other, out);
10193}
10194
10195// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor
10196inline at::Tensor lt(const at::Tensor & self, const at::Scalar & other) {
10197 return at::_ops::lt_Scalar::call(self, other);
10198}
10199
10200// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10201inline at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10202 return at::_ops::lt_Tensor_out::call(self, other, out);
10203}
10204// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10205inline at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10206 return at::_ops::lt_Tensor_out::call(self, other, out);
10207}
10208
10209// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor
10210inline at::Tensor lt(const at::Tensor & self, const at::Tensor & other) {
10211 return at::_ops::lt_Tensor::call(self, other);
10212}
10213
10214// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10215inline at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10216 return at::_ops::less_Scalar_out::call(self, other, out);
10217}
10218// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10219inline at::Tensor & less_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10220 return at::_ops::less_Scalar_out::call(self, other, out);
10221}
10222
10223// aten::less.Scalar(Tensor self, Scalar other) -> Tensor
10224inline at::Tensor less(const at::Tensor & self, const at::Scalar & other) {
10225 return at::_ops::less_Scalar::call(self, other);
10226}
10227
10228// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10229inline at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10230 return at::_ops::less_Tensor_out::call(self, other, out);
10231}
10232// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10233inline at::Tensor & less_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10234 return at::_ops::less_Tensor_out::call(self, other, out);
10235}
10236
10237// aten::less.Tensor(Tensor self, Tensor other) -> Tensor
10238inline at::Tensor less(const at::Tensor & self, const at::Tensor & other) {
10239 return at::_ops::less_Tensor::call(self, other);
10240}
10241
10242// aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
10243inline at::Tensor & take_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & index) {
10244 return at::_ops::take_out::call(self, index, out);
10245}
10246// aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
10247inline at::Tensor & take_outf(const at::Tensor & self, const at::Tensor & index, at::Tensor & out) {
10248 return at::_ops::take_out::call(self, index, out);
10249}
10250
10251// aten::take(Tensor self, Tensor index) -> Tensor
10252inline at::Tensor take(const at::Tensor & self, const at::Tensor & index) {
10253 return at::_ops::take::call(self, index);
10254}
10255
10256// aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
10258 return at::_ops::take_along_dim_out::call(self, indices, dim, out);
10259}
10260// aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
10261inline at::Tensor & take_along_dim_outf(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim, at::Tensor & out) {
10262 return at::_ops::take_along_dim_out::call(self, indices, dim, out);
10263}
10264
10265// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor
10267 return at::_ops::take_along_dim::call(self, indices, dim);
10268}
10269
10270// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
10271inline at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index) {
10272 return at::_ops::index_select_out::call(self, dim, index, out);
10273}
10274// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
10275inline at::Tensor & index_select_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
10276 return at::_ops::index_select_out::call(self, dim, index, out);
10277}
10278
10279// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor
10280inline at::Tensor index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
10281 return at::_ops::index_select::call(self, dim, index);
10282}
10283
10284// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
10285inline at::Tensor & index_select_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
10286 return at::_ops::index_select_dimname_out::call(self, dim, index, out);
10287}
10288// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
10289inline at::Tensor & index_select_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) {
10290 return at::_ops::index_select_dimname_out::call(self, dim, index, out);
10291}
10292
10293// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor
10294inline at::Tensor index_select(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
10295 return at::_ops::index_select_dimname::call(self, dim, index);
10296}
10297
10298// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor
10299inline at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
10300 return at::_ops::index_select_backward::call(grad, c10::fromIntArrayRefSlow(self_sizes), dim, index);
10301}
10302namespace symint {
10303 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
10304 at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
10305 return at::_ops::index_select_backward::call(grad, c10::fromIntArrayRefSlow(self_sizes), dim, index);
10306 }
10307}
10308
10309// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor
10310inline at::Tensor index_select_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
10311 return at::_ops::index_select_backward::call(grad, self_sizes, dim, index);
10312}
10313namespace symint {
10314 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
10315 at::Tensor index_select_backward(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
10316 return at::_ops::index_select_backward::call(grad, self_sizes, dim, index);
10317 }
10318}
10319
10320// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
10321inline at::Tensor & masked_select_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask) {
10322 return at::_ops::masked_select_out::call(self, mask, out);
10323}
10324// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
10325inline at::Tensor & masked_select_outf(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
10326 return at::_ops::masked_select_out::call(self, mask, out);
10327}
10328
10329// aten::masked_select(Tensor self, Tensor mask) -> Tensor
10330inline at::Tensor masked_select(const at::Tensor & self, const at::Tensor & mask) {
10331 return at::_ops::masked_select::call(self, mask);
10332}
10333
10334// aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor
10335inline at::Tensor masked_select_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) {
10336 return at::_ops::masked_select_backward::call(grad, input, mask);
10337}
10338
10339// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10340inline at::Tensor & nonzero_out(at::Tensor & out, const at::Tensor & self) {
10341 return at::_ops::nonzero_out::call(self, out);
10342}
10343// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10344inline at::Tensor & nonzero_outf(const at::Tensor & self, at::Tensor & out) {
10345 return at::_ops::nonzero_out::call(self, out);
10346}
10347
10348// aten::nonzero(Tensor self) -> Tensor
10349inline at::Tensor nonzero(const at::Tensor & self) {
10350 return at::_ops::nonzero::call(self);
10351}
10352
10353// aten::nonzero_numpy(Tensor self) -> Tensor[]
10354inline ::std::vector<at::Tensor> nonzero_numpy(const at::Tensor & self) {
10355 return at::_ops::nonzero_numpy::call(self);
10356}
10357
10358// aten::argwhere(Tensor self) -> Tensor
10359inline at::Tensor argwhere(const at::Tensor & self) {
10360 return at::_ops::argwhere::call(self);
10361}
10362
10363// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
10364inline at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false) {
10365 return at::_ops::gather_out::call(self, dim, index, sparse_grad, out);
10366}
10367// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
10368inline at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
10369 return at::_ops::gather_out::call(self, dim, index, sparse_grad, out);
10370}
10371
10372// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
10373inline at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false) {
10374 return at::_ops::gather::call(self, dim, index, sparse_grad);
10375}
10376
10377// aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor
10378inline at::Tensor gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
10379 return at::_ops::gather_backward::call(grad, self, dim, index, sparse_grad);
10380}
10381
10382// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
10383inline at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) {
10384 return at::_ops::gather_dimname_out::call(self, dim, index, sparse_grad, out);
10385}
10386// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
10387inline at::Tensor & gather_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
10388 return at::_ops::gather_dimname_out::call(self, dim, index, sparse_grad, out);
10389}
10390
10391// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor
10392inline at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) {
10393 return at::_ops::gather_dimname::call(self, dim, index, sparse_grad);
10394}
10395
10396// aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor
10397inline at::Tensor _gather_sparse_backward(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) {
10398 return at::_ops::_gather_sparse_backward::call(self, dim, index, grad);
10399}
10400
10401// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
10402inline at::Tensor & addcmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) {
10403 return at::_ops::addcmul_out::call(self, tensor1, tensor2, value, out);
10404}
10405// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
10406inline at::Tensor & addcmul_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
10407 return at::_ops::addcmul_out::call(self, tensor1, tensor2, value, out);
10408}
10409
10410// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
10411inline at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) {
10412 return at::_ops::addcmul::call(self, tensor1, tensor2, value);
10413}
10414
10415// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
10416inline at::Tensor & addcdiv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) {
10417 return at::_ops::addcdiv_out::call(self, tensor1, tensor2, value, out);
10418}
10419// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
10420inline at::Tensor & addcdiv_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
10421 return at::_ops::addcdiv_out::call(self, tensor1, tensor2, value, out);
10422}
10423
10424// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
10425inline at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) {
10426 return at::_ops::addcdiv::call(self, tensor1, tensor2, value);
10427}
10428
10429// aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor
10430inline at::Tensor cross_entropy_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100, double label_smoothing=0.0) {
10431 return at::_ops::cross_entropy_loss::call(self, target, weight, reduction, ignore_index, label_smoothing);
10432}
10433namespace symint {
10434 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
10435 at::Tensor cross_entropy_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100, double label_smoothing=0.0) {
10436 return at::_ops::cross_entropy_loss::call(self, target, weight, reduction, ignore_index, label_smoothing);
10437 }
10438}
10439
10440// aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor
10441inline at::Tensor cross_entropy_loss_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100, double label_smoothing=0.0) {
10442 return at::_ops::cross_entropy_loss::call(self, target, weight, reduction, ignore_index, label_smoothing);
10443}
10444namespace symint {
10445 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
10446 at::Tensor cross_entropy_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100, double label_smoothing=0.0) {
10447 return at::_ops::cross_entropy_loss::call(self, target, weight, reduction, ignore_index, label_smoothing);
10448 }
10449}
10450
10451// aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
10452inline ::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_out(at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) {
10453 return at::_ops::triangular_solve_X::call(self, A, upper, transpose, unitriangular, X, M);
10454}
10455// aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
10456inline ::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_outf(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
10457 return at::_ops::triangular_solve_X::call(self, A, upper, transpose, unitriangular, X, M);
10458}
10459
10460// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
10461inline ::std::tuple<at::Tensor,at::Tensor> triangular_solve(const at::Tensor & self, const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) {
10462 return at::_ops::triangular_solve::call(self, A, upper, transpose, unitriangular);
10463}
10464
10465// aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()
10466inline void _linalg_check_errors(const at::Tensor & info, c10::string_view api_name, bool is_matrix) {
10467 return at::_ops::_linalg_check_errors::call(info, api_name, is_matrix);
10468}
10469
10470// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)
10471inline at::Tensor & linalg_solve_triangular_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false) {
10472 return at::_ops::linalg_solve_triangular_out::call(self, B, upper, left, unitriangular, out);
10473}
10474// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)
10475inline at::Tensor & linalg_solve_triangular_outf(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) {
10476 return at::_ops::linalg_solve_triangular_out::call(self, B, upper, left, unitriangular, out);
10477}
10478
10479// aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor
10480inline at::Tensor linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false) {
10481 return at::_ops::linalg_solve_triangular::call(self, B, upper, left, unitriangular);
10482}
10483
10484// aten::linalg_vander(Tensor x, *, int? N=None) -> Tensor
10486 return at::_ops::linalg_vander::call(x, N);
10487}
10488
10489// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
10490inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & V, const at::Tensor & self, bool some=true, bool compute_uv=true) {
10491 return at::_ops::svd_U::call(self, some, compute_uv, U, S, V);
10492}
10493// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
10494inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_outf(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) {
10495 return at::_ops::svd_U::call(self, some, compute_uv, U, S, V);
10496}
10497
10498// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
10499inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd(const at::Tensor & self, bool some=true, bool compute_uv=true) {
10500 return at::_ops::svd::call(self, some, compute_uv);
10501}
10502
10503// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)
10504inline at::Tensor swapaxes(const at::Tensor & self, int64_t axis0, int64_t axis1) {
10505 return at::_ops::swapaxes::call(self, axis0, axis1);
10506}
10507
10508// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
10509inline at::Tensor swapdims(const at::Tensor & self, int64_t dim0, int64_t dim1) {
10510 return at::_ops::swapdims::call(self, dim0, dim1);
10511}
10512
10513// aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
10514inline at::Tensor & cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper=false) {
10515 return at::_ops::cholesky_out::call(self, upper, out);
10516}
10517// aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
10518inline at::Tensor & cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out) {
10519 return at::_ops::cholesky_out::call(self, upper, out);
10520}
10521
10522// aten::cholesky(Tensor self, bool upper=False) -> Tensor
10523inline at::Tensor cholesky(const at::Tensor & self, bool upper=false) {
10524 return at::_ops::cholesky::call(self, upper);
10525}
10526
10527// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
10528inline at::Tensor & cholesky_solve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, bool upper=false) {
10529 return at::_ops::cholesky_solve_out::call(self, input2, upper, out);
10530}
10531// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
10532inline at::Tensor & cholesky_solve_outf(const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out) {
10533 return at::_ops::cholesky_solve_out::call(self, input2, upper, out);
10534}
10535
10536// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor
10537inline at::Tensor cholesky_solve(const at::Tensor & self, const at::Tensor & input2, bool upper=false) {
10538 return at::_ops::cholesky_solve::call(self, input2, upper);
10539}
10540
10541// aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor
10542inline at::Tensor _cholesky_solve_helper(const at::Tensor & self, const at::Tensor & A, bool upper) {
10543 return at::_ops::_cholesky_solve_helper::call(self, A, upper);
10544}
10545
10546// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor
10547inline at::Tensor cholesky_inverse(const at::Tensor & self, bool upper=false) {
10548 return at::_ops::cholesky_inverse::call(self, upper);
10549}
10550
10551// aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
10552inline at::Tensor & cholesky_inverse_out(at::Tensor & out, const at::Tensor & self, bool upper=false) {
10553 return at::_ops::cholesky_inverse_out::call(self, upper, out);
10554}
10555// aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)
10556inline at::Tensor & cholesky_inverse_outf(const at::Tensor & self, bool upper, at::Tensor & out) {
10557 return at::_ops::cholesky_inverse_out::call(self, upper, out);
10558}
10559
10560// aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
10561inline ::std::tuple<at::Tensor &,at::Tensor &> qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & self, bool some=true) {
10562 return at::_ops::qr_Q::call(self, some, Q, R);
10563}
10564// aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
10565inline ::std::tuple<at::Tensor &,at::Tensor &> qr_outf(const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) {
10566 return at::_ops::qr_Q::call(self, some, Q, R);
10567}
10568
10569// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
10570inline ::std::tuple<at::Tensor,at::Tensor> qr(const at::Tensor & self, bool some=true) {
10571 return at::_ops::qr::call(self, some);
10572}
10573
10574// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)
10575inline ::std::tuple<at::Tensor &,at::Tensor &> geqrf_out(at::Tensor & a, at::Tensor & tau, const at::Tensor & self) {
10576 return at::_ops::geqrf_a::call(self, a, tau);
10577}
10578// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)
10579inline ::std::tuple<at::Tensor &,at::Tensor &> geqrf_outf(const at::Tensor & self, at::Tensor & a, at::Tensor & tau) {
10580 return at::_ops::geqrf_a::call(self, a, tau);
10581}
10582
10583// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)
10584inline ::std::tuple<at::Tensor,at::Tensor> geqrf(const at::Tensor & self) {
10585 return at::_ops::geqrf::call(self);
10586}
10587
10588// aten::orgqr(Tensor self, Tensor input2) -> Tensor
10589inline at::Tensor orgqr(const at::Tensor & self, const at::Tensor & input2) {
10590 return at::_ops::orgqr::call(self, input2);
10591}
10592
10593// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)
10594inline at::Tensor & orgqr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2) {
10595 return at::_ops::orgqr_out::call(self, input2, out);
10596}
10597// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)
10598inline at::Tensor & orgqr_outf(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) {
10599 return at::_ops::orgqr_out::call(self, input2, out);
10600}
10601
10602// aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
10603inline at::Tensor & ormqr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false) {
10604 return at::_ops::ormqr_out::call(self, input2, input3, left, transpose, out);
10605}
10606// aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)
10607inline at::Tensor & ormqr_outf(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) {
10608 return at::_ops::ormqr_out::call(self, input2, input3, left, transpose, out);
10609}
10610
10611// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor
10612inline at::Tensor ormqr(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false) {
10613 return at::_ops::ormqr::call(self, input2, input3, left, transpose);
10614}
10615
10616// aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)
10617inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _lu_with_info(const at::Tensor & self, bool pivot=true, bool check_errors=true) {
10618 return at::_ops::_lu_with_info::call(self, pivot, check_errors);
10619}
10620
10621// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
10622inline at::Tensor & lu_solve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
10623 return at::_ops::lu_solve_out::call(self, LU_data, LU_pivots, out);
10624}
10625// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
10626inline at::Tensor & lu_solve_outf(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) {
10627 return at::_ops::lu_solve_out::call(self, LU_data, LU_pivots, out);
10628}
10629
10630// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
10631inline at::Tensor lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
10632 return at::_ops::lu_solve::call(self, LU_data, LU_pivots);
10633}
10634
10635// aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)
10636inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true) {
10637 return at::_ops::lu_unpack::call(LU_data, LU_pivots, unpack_data, unpack_pivots);
10638}
10639
10640// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
10641inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true) {
10642 return at::_ops::lu_unpack_out::call(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U);
10643}
10644// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
10645inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
10646 return at::_ops::lu_unpack_out::call(LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U);
10647}
10648
10649// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
10650inline at::Tensor & multinomial_out(at::Tensor & out, const at::Tensor & self, int64_t num_samples, bool replacement=false, c10::optional<at::Generator> generator=c10::nullopt) {
10651 return at::_ops::multinomial_out::call(self, num_samples, replacement, generator, out);
10652}
10653// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
10654inline at::Tensor & multinomial_outf(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator, at::Tensor & out) {
10655 return at::_ops::multinomial_out::call(self, num_samples, replacement, generator, out);
10656}
10657
10658// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor
10659inline at::Tensor multinomial(const at::Tensor & self, int64_t num_samples, bool replacement=false, c10::optional<at::Generator> generator=c10::nullopt) {
10660 return at::_ops::multinomial::call(self, num_samples, replacement, generator);
10661}
10662
10663// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10664inline at::Tensor & lgamma_out(at::Tensor & out, const at::Tensor & self) {
10665 return at::_ops::lgamma_out::call(self, out);
10666}
10667// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10668inline at::Tensor & lgamma_outf(const at::Tensor & self, at::Tensor & out) {
10669 return at::_ops::lgamma_out::call(self, out);
10670}
10671
10672// aten::lgamma(Tensor self) -> Tensor
10673inline at::Tensor lgamma(const at::Tensor & self) {
10674 return at::_ops::lgamma::call(self);
10675}
10676
10677// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10678inline at::Tensor & digamma_out(at::Tensor & out, const at::Tensor & self) {
10679 return at::_ops::digamma_out::call(self, out);
10680}
10681// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10682inline at::Tensor & digamma_outf(const at::Tensor & self, at::Tensor & out) {
10683 return at::_ops::digamma_out::call(self, out);
10684}
10685
10686// aten::digamma(Tensor self) -> Tensor
10687inline at::Tensor digamma(const at::Tensor & self) {
10688 return at::_ops::digamma::call(self);
10689}
10690
10691// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10692inline at::Tensor & polygamma_out(at::Tensor & out, int64_t n, const at::Tensor & self) {
10693 return at::_ops::polygamma_out::call(n, self, out);
10694}
10695// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10696inline at::Tensor & polygamma_outf(int64_t n, const at::Tensor & self, at::Tensor & out) {
10697 return at::_ops::polygamma_out::call(n, self, out);
10698}
10699
10700// aten::polygamma(int n, Tensor self) -> Tensor
10701inline at::Tensor polygamma(int64_t n, const at::Tensor & self) {
10702 return at::_ops::polygamma::call(n, self);
10703}
10704
10705// aten::erfinv(Tensor self) -> Tensor
10706inline at::Tensor erfinv(const at::Tensor & self) {
10707 return at::_ops::erfinv::call(self);
10708}
10709
10710// aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10711inline at::Tensor & erfinv_out(at::Tensor & out, const at::Tensor & self) {
10712 return at::_ops::erfinv_out::call(self, out);
10713}
10714// aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10715inline at::Tensor & erfinv_outf(const at::Tensor & self, at::Tensor & out) {
10716 return at::_ops::erfinv_out::call(self, out);
10717}
10718
10719// aten::i0(Tensor self) -> Tensor
10720inline at::Tensor i0(const at::Tensor & self) {
10721 return at::_ops::i0::call(self);
10722}
10723
10724// aten::i0_(Tensor(a!) self) -> Tensor(a!)
10725inline at::Tensor & i0_(at::Tensor & self) {
10726 return at::_ops::i0_::call(self);
10727}
10728
10729// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10730inline at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self) {
10731 return at::_ops::i0_out::call(self, out);
10732}
10733// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10734inline at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out) {
10735 return at::_ops::i0_out::call(self, out);
10736}
10737
10738// aten::sign(Tensor self) -> Tensor
10739inline at::Tensor sign(const at::Tensor & self) {
10740 return at::_ops::sign::call(self);
10741}
10742
10743// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10744inline at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self) {
10745 return at::_ops::sign_out::call(self, out);
10746}
10747// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10748inline at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out) {
10749 return at::_ops::sign_out::call(self, out);
10750}
10751
10752// aten::signbit(Tensor self) -> Tensor
10753inline at::Tensor signbit(const at::Tensor & self) {
10754 return at::_ops::signbit::call(self);
10755}
10756
10757// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10758inline at::Tensor & signbit_out(at::Tensor & out, const at::Tensor & self) {
10759 return at::_ops::signbit_out::call(self, out);
10760}
10761// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
10762inline at::Tensor & signbit_outf(const at::Tensor & self, at::Tensor & out) {
10763 return at::_ops::signbit_out::call(self, out);
10764}
10765
10766// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
10767inline at::Tensor dist(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p=2) {
10768 return at::_ops::dist::call(self, other, p);
10769}
10770
10771// aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10772inline at::Tensor & atan2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10773 return at::_ops::atan2_out::call(self, other, out);
10774}
10775// aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10776inline at::Tensor & atan2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10777 return at::_ops::atan2_out::call(self, other, out);
10778}
10779
10780// aten::atan2(Tensor self, Tensor other) -> Tensor
10781inline at::Tensor atan2(const at::Tensor & self, const at::Tensor & other) {
10782 return at::_ops::atan2::call(self, other);
10783}
10784
10785// aten::arctan2(Tensor self, Tensor other) -> Tensor
10786inline at::Tensor arctan2(const at::Tensor & self, const at::Tensor & other) {
10787 return at::_ops::arctan2::call(self, other);
10788}
10789
10790// aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10791inline at::Tensor & arctan2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10792 return at::_ops::arctan2_out::call(self, other, out);
10793}
10794// aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10795inline at::Tensor & arctan2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10796 return at::_ops::arctan2_out::call(self, other, out);
10797}
10798
10799// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
10800inline at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
10801 return at::_ops::lerp_Scalar_out::call(self, end, weight, out);
10802}
10803// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
10804inline at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) {
10805 return at::_ops::lerp_Scalar_out::call(self, end, weight, out);
10806}
10807
10808// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
10809inline at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
10810 return at::_ops::lerp_Tensor_out::call(self, end, weight, out);
10811}
10812// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
10813inline at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) {
10814 return at::_ops::lerp_Tensor_out::call(self, end, weight, out);
10815}
10816
10817// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor
10818inline at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
10819 return at::_ops::lerp_Scalar::call(self, end, weight);
10820}
10821
10822// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor
10823inline at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
10824 return at::_ops::lerp_Tensor::call(self, end, weight);
10825}
10826
10827// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
10828inline at::Tensor & histc_out(at::Tensor & out, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) {
10829 return at::_ops::histc_out::call(self, bins, min, max, out);
10830}
10831// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
10832inline at::Tensor & histc_outf(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) {
10833 return at::_ops::histc_out::call(self, bins, min, max, out);
10834}
10835
10836// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
10837inline at::Tensor histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) {
10838 return at::_ops::histc::call(self, bins, min, max);
10839}
10840
10841// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
10842inline ::std::tuple<at::Tensor &,at::Tensor &> histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight={}, bool density=false) {
10843 return at::_ops::histogram_bins_tensor_out::call(self, bins, weight, density, hist, bin_edges);
10844}
10845// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
10846inline ::std::tuple<at::Tensor &,at::Tensor &> histogram_outf(const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
10847 return at::_ops::histogram_bins_tensor_out::call(self, bins, weight, density, hist, bin_edges);
10848}
10849
10850// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
10851inline ::std::tuple<at::Tensor,at::Tensor> histogram(const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight={}, bool density=false) {
10852 return at::_ops::histogram_bins_tensor::call(self, bins, weight, density);
10853}
10854
10855// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
10856inline ::std::tuple<at::Tensor &,at::Tensor &> histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, int64_t bins=100, c10::optional<at::ArrayRef<double>> range=c10::nullopt, const c10::optional<at::Tensor> & weight={}, bool density=false) {
10857 return at::_ops::histogram_bin_ct_out::call(self, bins, range, weight, density, hist, bin_edges);
10858}
10859// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
10860inline ::std::tuple<at::Tensor &,at::Tensor &> histogram_outf(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
10861 return at::_ops::histogram_bin_ct_out::call(self, bins, range, weight, density, hist, bin_edges);
10862}
10863
10864// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
10865inline ::std::tuple<at::Tensor,at::Tensor> histogram(const at::Tensor & self, int64_t bins=100, c10::optional<at::ArrayRef<double>> range=c10::nullopt, const c10::optional<at::Tensor> & weight={}, bool density=false) {
10866 return at::_ops::histogram_bin_ct::call(self, bins, range, weight, density);
10867}
10868
10869// aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]
10870inline ::std::vector<at::Tensor> _histogramdd_bin_edges(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range=c10::nullopt, const c10::optional<at::Tensor> & weight={}, bool density=false) {
10871 return at::_ops::_histogramdd_bin_edges::call(self, bins, range, weight, density);
10872}
10873
10874// aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor
10875inline at::Tensor _histogramdd_from_bin_cts(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range=c10::nullopt, const c10::optional<at::Tensor> & weight={}, bool density=false) {
10876 return at::_ops::_histogramdd_from_bin_cts::call(self, bins, range, weight, density);
10877}
10878
10879// aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor
10880inline at::Tensor _histogramdd_from_bin_tensors(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight={}, bool density=false) {
10881 return at::_ops::_histogramdd_from_bin_tensors::call(self, bins, weight, density);
10882}
10883
10884// aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
10885inline ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range=c10::nullopt, const c10::optional<at::Tensor> & weight={}, bool density=false) {
10886 return at::_ops::histogramdd::call(self, bins, range, weight, density);
10887}
10888
10889// aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
10890inline ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range=c10::nullopt, const c10::optional<at::Tensor> & weight={}, bool density=false) {
10891 return at::_ops::histogramdd_int_bins::call(self, bins, range, weight, density);
10892}
10893
10894// aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)
10895inline ::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd(const at::Tensor & self, at::TensorList bins, c10::optional<at::ArrayRef<double>> range=c10::nullopt, const c10::optional<at::Tensor> & weight={}, bool density=false) {
10896 return at::_ops::histogramdd_TensorList_bins::call(self, bins, range, weight, density);
10897}
10898
10899// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10900inline at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10901 return at::_ops::fmod_Scalar_out::call(self, other, out);
10902}
10903// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10904inline at::Tensor & fmod_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10905 return at::_ops::fmod_Scalar_out::call(self, other, out);
10906}
10907
10908// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor
10909inline at::Tensor fmod(const at::Tensor & self, const at::Scalar & other) {
10910 return at::_ops::fmod_Scalar::call(self, other);
10911}
10912
10913// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10914inline at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10915 return at::_ops::fmod_Tensor_out::call(self, other, out);
10916}
10917// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10918inline at::Tensor & fmod_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10919 return at::_ops::fmod_Tensor_out::call(self, other, out);
10920}
10921
10922// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor
10923inline at::Tensor fmod(const at::Tensor & self, const at::Tensor & other) {
10924 return at::_ops::fmod_Tensor::call(self, other);
10925}
10926
10927// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10928inline at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10929 return at::_ops::hypot_out::call(self, other, out);
10930}
10931// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10932inline at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10933 return at::_ops::hypot_out::call(self, other, out);
10934}
10935
10936// aten::hypot(Tensor self, Tensor other) -> Tensor
10937inline at::Tensor hypot(const at::Tensor & self, const at::Tensor & other) {
10938 return at::_ops::hypot::call(self, other);
10939}
10940
10941// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10942inline at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10943 return at::_ops::igamma_out::call(self, other, out);
10944}
10945// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10946inline at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10947 return at::_ops::igamma_out::call(self, other, out);
10948}
10949
10950// aten::igamma(Tensor self, Tensor other) -> Tensor
10951inline at::Tensor igamma(const at::Tensor & self, const at::Tensor & other) {
10952 return at::_ops::igamma::call(self, other);
10953}
10954
10955// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10956inline at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10957 return at::_ops::igammac_out::call(self, other, out);
10958}
10959// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10960inline at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10961 return at::_ops::igammac_out::call(self, other, out);
10962}
10963
10964// aten::igammac(Tensor self, Tensor other) -> Tensor
10965inline at::Tensor igammac(const at::Tensor & self, const at::Tensor & other) {
10966 return at::_ops::igammac::call(self, other);
10967}
10968
10969// aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10970inline at::Tensor & nextafter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10971 return at::_ops::nextafter_out::call(self, other, out);
10972}
10973// aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10974inline at::Tensor & nextafter_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
10975 return at::_ops::nextafter_out::call(self, other, out);
10976}
10977
10978// aten::nextafter(Tensor self, Tensor other) -> Tensor
10979inline at::Tensor nextafter(const at::Tensor & self, const at::Tensor & other) {
10980 return at::_ops::nextafter::call(self, other);
10981}
10982
10983// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10984inline at::Tensor & remainder_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
10985 return at::_ops::remainder_Scalar_out::call(self, other, out);
10986}
10987// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
10988inline at::Tensor & remainder_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
10989 return at::_ops::remainder_Scalar_out::call(self, other, out);
10990}
10991
10992// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor
10993inline at::Tensor remainder(const at::Tensor & self, const at::Scalar & other) {
10994 return at::_ops::remainder_Scalar::call(self, other);
10995}
10996
10997// aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
10998inline at::Tensor & remainder_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
10999 return at::_ops::remainder_Tensor_out::call(self, other, out);
11000}
11001// aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11002inline at::Tensor & remainder_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11003 return at::_ops::remainder_Tensor_out::call(self, other, out);
11004}
11005
11006// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor
11007inline at::Tensor remainder(const at::Tensor & self, const at::Tensor & other) {
11008 return at::_ops::remainder_Tensor::call(self, other);
11009}
11010
11011// aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
11012inline at::Tensor remainder(const at::Scalar & self, const at::Tensor & other) {
11013 return at::_ops::remainder_Scalar_Tensor::call(self, other);
11014}
11015
11016// aten::min(Tensor self) -> Tensor
11017inline at::Tensor min(const at::Tensor & self) {
11018 return at::_ops::min::call(self);
11019}
11020
11021// aten::fmin(Tensor self, Tensor other) -> Tensor
11022inline at::Tensor fmin(const at::Tensor & self, const at::Tensor & other) {
11023 return at::_ops::fmin::call(self, other);
11024}
11025
11026// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11027inline at::Tensor & fmin_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11028 return at::_ops::fmin_out::call(self, other, out);
11029}
11030// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11031inline at::Tensor & fmin_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11032 return at::_ops::fmin_out::call(self, other, out);
11033}
11034
11035// aten::max(Tensor self) -> Tensor
11036inline at::Tensor max(const at::Tensor & self) {
11037 return at::_ops::max::call(self);
11038}
11039
11040// aten::fmax(Tensor self, Tensor other) -> Tensor
11041inline at::Tensor fmax(const at::Tensor & self, const at::Tensor & other) {
11042 return at::_ops::fmax::call(self, other);
11043}
11044
11045// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11046inline at::Tensor & fmax_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11047 return at::_ops::fmax_out::call(self, other, out);
11048}
11049// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11050inline at::Tensor & fmax_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11051 return at::_ops::fmax_out::call(self, other, out);
11052}
11053
11054// aten::maximum(Tensor self, Tensor other) -> Tensor
11055inline at::Tensor maximum(const at::Tensor & self, const at::Tensor & other) {
11056 return at::_ops::maximum::call(self, other);
11057}
11058
11059// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11060inline at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11061 return at::_ops::maximum_out::call(self, other, out);
11062}
11063// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11064inline at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11065 return at::_ops::maximum_out::call(self, other, out);
11066}
11067
11068// aten::max.other(Tensor self, Tensor other) -> Tensor
11069inline at::Tensor max(const at::Tensor & self, const at::Tensor & other) {
11070 return at::_ops::max_other::call(self, other);
11071}
11072
11073// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11074inline at::Tensor & max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11075 return at::_ops::max_out::call(self, other, out);
11076}
11077// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11078inline at::Tensor & max_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11079 return at::_ops::max_out::call(self, other, out);
11080}
11081
11082// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11083inline at::Tensor & max_out(at::Tensor & out, const at::Tensor & self) {
11084 return at::_ops::max_unary_out::call(self, out);
11085}
11086// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11087inline at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out) {
11088 return at::_ops::max_unary_out::call(self, out);
11089}
11090
11091// aten::minimum(Tensor self, Tensor other) -> Tensor
11092inline at::Tensor minimum(const at::Tensor & self, const at::Tensor & other) {
11093 return at::_ops::minimum::call(self, other);
11094}
11095
11096// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11097inline at::Tensor & minimum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11098 return at::_ops::minimum_out::call(self, other, out);
11099}
11100// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11101inline at::Tensor & minimum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11102 return at::_ops::minimum_out::call(self, other, out);
11103}
11104
11105// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11106inline at::Tensor & min_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
11107 return at::_ops::min_out::call(self, other, out);
11108}
11109// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
11110inline at::Tensor & min_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
11111 return at::_ops::min_out::call(self, other, out);
11112}
11113
11114// aten::min.other(Tensor self, Tensor other) -> Tensor
11115inline at::Tensor min(const at::Tensor & self, const at::Tensor & other) {
11116 return at::_ops::min_other::call(self, other);
11117}
11118
11119// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
11120inline at::Tensor quantile(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") {
11121 return at::_ops::quantile::call(self, q, dim, keepdim, interpolation);
11122}
11123
11124// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
11125inline at::Tensor & quantile_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") {
11126 return at::_ops::quantile_out::call(self, q, dim, keepdim, interpolation, out);
11127}
11128// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
11129inline at::Tensor & quantile_outf(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
11130 return at::_ops::quantile_out::call(self, q, dim, keepdim, interpolation, out);
11131}
11132
11133// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
11134inline at::Tensor quantile(const at::Tensor & self, double q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") {
11135 return at::_ops::quantile_scalar::call(self, q, dim, keepdim, interpolation);
11136}
11137
11138// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
11139inline at::Tensor & quantile_out(at::Tensor & out, const at::Tensor & self, double q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") {
11140 return at::_ops::quantile_scalar_out::call(self, q, dim, keepdim, interpolation, out);
11141}
11142// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
11143inline at::Tensor & quantile_outf(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
11144 return at::_ops::quantile_scalar_out::call(self, q, dim, keepdim, interpolation, out);
11145}
11146
11147// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
11148inline at::Tensor nanquantile(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") {
11149 return at::_ops::nanquantile::call(self, q, dim, keepdim, interpolation);
11150}
11151
11152// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
11153inline at::Tensor & nanquantile_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") {
11154 return at::_ops::nanquantile_out::call(self, q, dim, keepdim, interpolation, out);
11155}
11156// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
11157inline at::Tensor & nanquantile_outf(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
11158 return at::_ops::nanquantile_out::call(self, q, dim, keepdim, interpolation, out);
11159}
11160
11161// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
11162inline at::Tensor nanquantile(const at::Tensor & self, double q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") {
11163 return at::_ops::nanquantile_scalar::call(self, q, dim, keepdim, interpolation);
11164}
11165
11166// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
11167inline at::Tensor & nanquantile_out(at::Tensor & out, const at::Tensor & self, double q, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") {
11168 return at::_ops::nanquantile_scalar_out::call(self, q, dim, keepdim, interpolation, out);
11169}
11170// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
11171inline at::Tensor & nanquantile_outf(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
11172 return at::_ops::nanquantile_scalar_out::call(self, q, dim, keepdim, interpolation, out);
11173}
11174
11175// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
11176inline ::std::tuple<at::Tensor &,at::Tensor &> sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim=-1, bool descending=false) {
11177 return at::_ops::sort_values::call(self, dim, descending, values, indices);
11178}
11179// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
11180inline ::std::tuple<at::Tensor &,at::Tensor &> sort_outf(const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
11181 return at::_ops::sort_values::call(self, dim, descending, values, indices);
11182}
11183
11184// aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
11185inline ::std::tuple<at::Tensor &,at::Tensor &> sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::optional<bool> stable, int64_t dim=-1, bool descending=false) {
11186 return at::_ops::sort_values_stable::call(self, stable, dim, descending, values, indices);
11187}
11188// aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
11189inline ::std::tuple<at::Tensor &,at::Tensor &> sort_outf(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
11190 return at::_ops::sort_values_stable::call(self, stable, dim, descending, values, indices);
11191}
11192
11193// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
11194inline ::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, int64_t dim=-1, bool descending=false) {
11195 return at::_ops::sort::call(self, dim, descending);
11196}
11197
11198// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
11199inline ::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, c10::optional<bool> stable, int64_t dim=-1, bool descending=false) {
11200 return at::_ops::sort_stable::call(self, stable, dim, descending);
11201}
11202
11203// aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
11204inline ::std::tuple<at::Tensor &,at::Tensor &> sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool descending=false) {
11205 return at::_ops::sort_dimname_values::call(self, dim, descending, values, indices);
11206}
11207// aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
11208inline ::std::tuple<at::Tensor &,at::Tensor &> sort_outf(const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
11209 return at::_ops::sort_dimname_values::call(self, dim, descending, values, indices);
11210}
11211
11212// aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
11213inline ::std::tuple<at::Tensor &,at::Tensor &> sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending=false) {
11214 return at::_ops::sort_dimname_values_stable::call(self, stable, dim, descending, values, indices);
11215}
11216// aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
11217inline ::std::tuple<at::Tensor &,at::Tensor &> sort_outf(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
11218 return at::_ops::sort_dimname_values_stable::call(self, stable, dim, descending, values, indices);
11219}
11220
11221// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
11222inline ::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, at::Dimname dim, bool descending=false) {
11223 return at::_ops::sort_dimname::call(self, dim, descending);
11224}
11225
11226// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)
11227inline ::std::tuple<at::Tensor,at::Tensor> sort(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending=false) {
11228 return at::_ops::sort_dimname_stable::call(self, stable, dim, descending);
11229}
11230
11231// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11232inline at::Tensor & msort_out(at::Tensor & out, const at::Tensor & self) {
11233 return at::_ops::msort_out::call(self, out);
11234}
11235// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11236inline at::Tensor & msort_outf(const at::Tensor & self, at::Tensor & out) {
11237 return at::_ops::msort_out::call(self, out);
11238}
11239
11240// aten::msort(Tensor self) -> Tensor
11241inline at::Tensor msort(const at::Tensor & self) {
11242 return at::_ops::msort::call(self);
11243}
11244
11245// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor
11246inline at::Tensor argsort(const at::Tensor & self, int64_t dim=-1, bool descending=false) {
11247 return at::_ops::argsort::call(self, dim, descending);
11248}
11249
11250// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor
11251inline at::Tensor argsort(const at::Tensor & self, bool stable, int64_t dim=-1, bool descending=false) {
11252 return at::_ops::argsort_stable::call(self, stable, dim, descending);
11253}
11254
11255// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor
11256inline at::Tensor argsort(const at::Tensor & self, at::Dimname dim, bool descending=false) {
11257 return at::_ops::argsort_dimname::call(self, dim, descending);
11258}
11259
11260// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
11261inline ::std::tuple<at::Tensor &,at::Tensor &> topk_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) {
11262 return at::_ops::topk_values::call(self, k, dim, largest, sorted, values, indices);
11263}
11264// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
11265inline ::std::tuple<at::Tensor &,at::Tensor &> topk_outf(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) {
11266 return at::_ops::topk_values::call(self, k, dim, largest, sorted, values, indices);
11267}
11268
11269// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
11270inline ::std::tuple<at::Tensor,at::Tensor> topk(const at::Tensor & self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) {
11271 return at::_ops::topk::call(self, k, dim, largest, sorted);
11272}
11273
11274// aten::all(Tensor self) -> Tensor
11275inline at::Tensor all(const at::Tensor & self) {
11276 return at::_ops::all::call(self);
11277}
11278
11279// aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11280inline at::Tensor & all_out(at::Tensor & out, const at::Tensor & self) {
11281 return at::_ops::all_all_out::call(self, out);
11282}
11283// aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11284inline at::Tensor & all_outf(const at::Tensor & self, at::Tensor & out) {
11285 return at::_ops::all_all_out::call(self, out);
11286}
11287
11288// aten::any(Tensor self) -> Tensor
11289inline at::Tensor any(const at::Tensor & self) {
11290 return at::_ops::any::call(self);
11291}
11292
11293// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11294inline at::Tensor & any_out(at::Tensor & out, const at::Tensor & self) {
11295 return at::_ops::any_all_out::call(self, out);
11296}
11297// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
11298inline at::Tensor & any_outf(const at::Tensor & self, at::Tensor & out) {
11299 return at::_ops::any_all_out::call(self, out);
11300}
11301
11302// aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)
11303inline at::Tensor & renorm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
11304 return at::_ops::renorm_out::call(self, p, dim, maxnorm, out);
11305}
11306// aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)
11307inline at::Tensor & renorm_outf(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) {
11308 return at::_ops::renorm_out::call(self, p, dim, maxnorm, out);
11309}
11310
11311// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
11312inline at::Tensor renorm(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
11313 return at::_ops::renorm::call(self, p, dim, maxnorm);
11314}
11315
11316// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
11317inline at::Tensor unfold_backward(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
11318 return at::_ops::unfold_backward::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step);
11319}
11320namespace symint {
11321 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
11322 at::Tensor unfold_backward(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
11323 return at::_ops::unfold_backward::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step);
11324 }
11325}
11326
11327// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
11328inline at::Tensor unfold_backward_symint(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
11329 return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step);
11330}
11331namespace symint {
11332 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
11333 at::Tensor unfold_backward(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
11334 return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step);
11335 }
11336}
11337
11338// aten::equal(Tensor self, Tensor other) -> bool
11339inline bool equal(const at::Tensor & self, const at::Tensor & other) {
11340 return at::_ops::equal::call(self, other);
11341}
11342
11343// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
11344inline at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) {
11345 return at::_ops::pow_Tensor_Tensor_out::call(self, exponent, out);
11346}
11347// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
11348inline at::Tensor & pow_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
11349 return at::_ops::pow_Tensor_Tensor_out::call(self, exponent, out);
11350}
11351
11352// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
11353inline at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent) {
11354 return at::_ops::pow_Tensor_Tensor::call(self, exponent);
11355}
11356
11357// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
11358inline at::Tensor & pow_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) {
11359 return at::_ops::pow_Scalar_out::call(self, exponent, out);
11360}
11361// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
11362inline at::Tensor & pow_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
11363 return at::_ops::pow_Scalar_out::call(self, exponent, out);
11364}
11365
11366// aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor
11367inline at::Tensor pow(const at::Scalar & self, const at::Tensor & exponent) {
11368 return at::_ops::pow_Scalar::call(self, exponent);
11369}
11370
11371// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
11372inline at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) {
11373 return at::_ops::pow_Tensor_Scalar_out::call(self, exponent, out);
11374}
11375// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
11376inline at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
11377 return at::_ops::pow_Tensor_Scalar_out::call(self, exponent, out);
11378}
11379
11380// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
11381inline at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent) {
11382 return at::_ops::pow_Tensor_Scalar::call(self, exponent);
11383}
11384
11385// aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
11386inline at::Tensor & float_power_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) {
11387 return at::_ops::float_power_Tensor_Tensor_out::call(self, exponent, out);
11388}
11389// aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
11390inline at::Tensor & float_power_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
11391 return at::_ops::float_power_Tensor_Tensor_out::call(self, exponent, out);
11392}
11393
11394// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
11395inline at::Tensor float_power(const at::Tensor & self, const at::Tensor & exponent) {
11396 return at::_ops::float_power_Tensor_Tensor::call(self, exponent);
11397}
11398
11399// aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
11400inline at::Tensor & float_power_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) {
11401 return at::_ops::float_power_Scalar_out::call(self, exponent, out);
11402}
11403// aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
11404inline at::Tensor & float_power_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
11405 return at::_ops::float_power_Scalar_out::call(self, exponent, out);
11406}
11407
11408// aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor
11409inline at::Tensor float_power(const at::Scalar & self, const at::Tensor & exponent) {
11410 return at::_ops::float_power_Scalar::call(self, exponent);
11411}
11412
11413// aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
11414inline at::Tensor & float_power_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) {
11415 return at::_ops::float_power_Tensor_Scalar_out::call(self, exponent, out);
11416}
11417// aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
11418inline at::Tensor & float_power_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
11419 return at::_ops::float_power_Tensor_Scalar_out::call(self, exponent, out);
11420}
11421
11422// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
11423inline at::Tensor float_power(const at::Tensor & self, const at::Scalar & exponent) {
11424 return at::_ops::float_power_Tensor_Scalar::call(self, exponent);
11425}
11426
11427// aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor
11428inline at::Tensor normal_functional(const at::Tensor & self, double mean=0, double std=1, c10::optional<at::Generator> generator=c10::nullopt) {
11429 return at::_ops::normal_functional::call(self, mean, std, generator);
11430}
11431
11432// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
11434 return at::_ops::normal_Tensor_float_out::call(mean, std, generator, out);
11435}
11436// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
11438 return at::_ops::normal_Tensor_float_out::call(mean, std, generator, out);
11439}
11440
11441// aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
11443 return at::_ops::normal_Tensor_float::call(mean, std, generator);
11444}
11445
11446// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
11448 return at::_ops::normal_float_Tensor_out::call(mean, std, generator, out);
11449}
11450// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
11452 return at::_ops::normal_float_Tensor_out::call(mean, std, generator, out);
11453}
11454
11455// aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor
11457 return at::_ops::normal_float_Tensor::call(mean, std, generator);
11458}
11459
11460// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
11462 return at::_ops::normal_Tensor_Tensor_out::call(mean, std, generator, out);
11463}
11464// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
11466 return at::_ops::normal_Tensor_Tensor_out::call(mean, std, generator, out);
11467}
11468
11469// aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
11471 return at::_ops::normal_Tensor_Tensor::call(mean, std, generator);
11472}
11473
11474// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11475inline at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator=c10::nullopt, at::TensorOptions options={}) {
11476 return at::_ops::normal_float_float::call(mean, std, c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
11477}
11478namespace symint {
11479 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
11480 at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator=c10::nullopt, at::TensorOptions options={}) {
11481 return at::_ops::normal_float_float::call(mean, std, c10::fromIntArrayRefSlow(size), generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
11482 }
11483}
11484
11485// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11487 return at::_ops::normal_float_float::call(mean, std, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
11488}
11489namespace symint {
11490 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
11492 return at::_ops::normal_float_float::call(mean, std, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory);
11493 }
11494}
11495
11496// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11497inline at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator=c10::nullopt, at::TensorOptions options={}) {
11498 return at::_ops::normal_float_float::call(mean, std, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
11499}
11500namespace symint {
11501 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
11502 at::Tensor normal(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator=c10::nullopt, at::TensorOptions options={}) {
11503 return at::_ops::normal_float_float::call(mean, std, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
11504 }
11505}
11506
11507// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
11509 return at::_ops::normal_float_float::call(mean, std, size, generator, dtype, layout, device, pin_memory);
11510}
11511namespace symint {
11512 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
11514 return at::_ops::normal_float_float::call(mean, std, size, generator, dtype, layout, device, pin_memory);
11515 }
11516}
11517
11518// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
11519inline at::Tensor & normal_out(at::Tensor & out, double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator=c10::nullopt) {
11520 return at::_ops::normal_float_float_out::call(mean, std, c10::fromIntArrayRefSlow(size), generator, out);
11521}
11522namespace symint {
11523 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
11524 at::Tensor & normal_out(at::Tensor & out, double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator=c10::nullopt) {
11525 return at::_ops::normal_float_float_out::call(mean, std, c10::fromIntArrayRefSlow(size), generator, out);
11526 }
11527}
11528
11529// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
11530inline at::Tensor & normal_outf(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
11531 return at::_ops::normal_float_float_out::call(mean, std, c10::fromIntArrayRefSlow(size), generator, out);
11532}
11533namespace symint {
11534 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
11535 at::Tensor & normal_outf(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
11536 return at::_ops::normal_float_float_out::call(mean, std, c10::fromIntArrayRefSlow(size), generator, out);
11537 }
11538}
11539
11540// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
11541inline at::Tensor & normal_symint_out(at::Tensor & out, double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator=c10::nullopt) {
11542 return at::_ops::normal_float_float_out::call(mean, std, size, generator, out);
11543}
11544namespace symint {
11545 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
11546 at::Tensor & normal_out(at::Tensor & out, double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator=c10::nullopt) {
11547 return at::_ops::normal_float_float_out::call(mean, std, size, generator, out);
11548 }
11549}
11550
11551// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
11552inline at::Tensor & normal_symint_outf(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
11553 return at::_ops::normal_float_float_out::call(mean, std, size, generator, out);
11554}
11555namespace symint {
11556 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
11557 at::Tensor & normal_outf(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) {
11558 return at::_ops::normal_float_float_out::call(mean, std, size, generator, out);
11559 }
11560}
11561
11562// aten::alias(Tensor(a) self) -> Tensor(a)
11563inline at::Tensor alias(const at::Tensor & self) {
11564 return at::_ops::alias::call(self);
11565}
11566
11567// aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
11568inline void _amp_foreach_non_finite_check_and_unscale_(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
11569 return at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self, found_inf, inv_scale);
11570}
11571
11572// aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)
11573inline at::Tensor & _amp_update_scale_(at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
11574 return at::_ops::_amp_update_scale_::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
11575}
11576
11577// aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
11578inline ::std::vector<at::Tensor> _foreach_add(at::TensorList self, const at::Scalar & scalar) {
11579 return at::_ops::_foreach_add_Scalar::call(self, scalar);
11580}
11581
11582// aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
11583inline void _foreach_add_(at::TensorList self, const at::Scalar & scalar) {
11584 return at::_ops::_foreach_add__Scalar::call(self, scalar);
11585}
11586
11587// aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
11588inline ::std::vector<at::Tensor> _foreach_sub(at::TensorList self, const at::Scalar & scalar) {
11589 return at::_ops::_foreach_sub_Scalar::call(self, scalar);
11590}
11591
11592// aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
11593inline void _foreach_sub_(at::TensorList self, const at::Scalar & scalar) {
11594 return at::_ops::_foreach_sub__Scalar::call(self, scalar);
11595}
11596
11597// aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
11598inline ::std::vector<at::Tensor> _foreach_mul(at::TensorList self, const at::Scalar & scalar) {
11599 return at::_ops::_foreach_mul_Scalar::call(self, scalar);
11600}
11601
11602// aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
11603inline void _foreach_mul_(at::TensorList self, const at::Scalar & scalar) {
11604 return at::_ops::_foreach_mul__Scalar::call(self, scalar);
11605}
11606
11607// aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
11608inline ::std::vector<at::Tensor> _foreach_div(at::TensorList self, const at::Scalar & scalar) {
11609 return at::_ops::_foreach_div_Scalar::call(self, scalar);
11610}
11611
11612// aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
11613inline void _foreach_div_(at::TensorList self, const at::Scalar & scalar) {
11614 return at::_ops::_foreach_div__Scalar::call(self, scalar);
11615}
11616
11617// aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
11618inline ::std::vector<at::Tensor> _foreach_clamp_min(at::TensorList self, const at::Scalar & scalar) {
11619 return at::_ops::_foreach_clamp_min_Scalar::call(self, scalar);
11620}
11621
11622// aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
11623inline void _foreach_clamp_min_(at::TensorList self, const at::Scalar & scalar) {
11624 return at::_ops::_foreach_clamp_min__Scalar::call(self, scalar);
11625}
11626
11627// aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
11628inline ::std::vector<at::Tensor> _foreach_clamp_max(at::TensorList self, const at::Scalar & scalar) {
11629 return at::_ops::_foreach_clamp_max_Scalar::call(self, scalar);
11630}
11631
11632// aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
11633inline void _foreach_clamp_max_(at::TensorList self, const at::Scalar & scalar) {
11634 return at::_ops::_foreach_clamp_max__Scalar::call(self, scalar);
11635}
11636
11637// aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
11638inline ::std::vector<at::Tensor> _foreach_maximum(at::TensorList self, const at::Scalar & scalar) {
11639 return at::_ops::_foreach_maximum_Scalar::call(self, scalar);
11640}
11641
11642// aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
11643inline void _foreach_maximum_(at::TensorList self, const at::Scalar & scalar) {
11644 return at::_ops::_foreach_maximum__Scalar::call(self, scalar);
11645}
11646
11647// aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]
11648inline ::std::vector<at::Tensor> _foreach_minimum(at::TensorList self, const at::Scalar & scalar) {
11649 return at::_ops::_foreach_minimum_Scalar::call(self, scalar);
11650}
11651
11652// aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()
11653inline void _foreach_minimum_(at::TensorList self, const at::Scalar & scalar) {
11654 return at::_ops::_foreach_minimum__Scalar::call(self, scalar);
11655}
11656
11657// aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
11658inline ::std::vector<at::Tensor> _foreach_add(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) {
11659 return at::_ops::_foreach_add_List::call(self, other, alpha);
11660}
11661
11662// aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
11663inline void _foreach_add_(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) {
11664 return at::_ops::_foreach_add__List::call(self, other, alpha);
11665}
11666
11667// aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]
11668inline ::std::vector<at::Tensor> _foreach_sub(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) {
11669 return at::_ops::_foreach_sub_List::call(self, other, alpha);
11670}
11671
11672// aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()
11673inline void _foreach_sub_(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) {
11674 return at::_ops::_foreach_sub__List::call(self, other, alpha);
11675}
11676
11677// aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]
11678inline ::std::vector<at::Tensor> _foreach_mul(at::TensorList self, at::TensorList other) {
11679 return at::_ops::_foreach_mul_List::call(self, other);
11680}
11681
11682// aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()
11684 return at::_ops::_foreach_mul__List::call(self, other);
11685}
11686
11687// aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]
11688inline ::std::vector<at::Tensor> _foreach_div(at::TensorList self, at::TensorList other) {
11689 return at::_ops::_foreach_div_List::call(self, other);
11690}
11691
11692// aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()
11694 return at::_ops::_foreach_div__List::call(self, other);
11695}
11696
11697// aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[]
11698inline ::std::vector<at::Tensor> _foreach_clamp_min(at::TensorList self, at::TensorList other) {
11699 return at::_ops::_foreach_clamp_min_List::call(self, other);
11700}
11701
11702// aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> ()
11704 return at::_ops::_foreach_clamp_min__List::call(self, other);
11705}
11706
11707// aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[]
11708inline ::std::vector<at::Tensor> _foreach_clamp_max(at::TensorList self, at::TensorList other) {
11709 return at::_ops::_foreach_clamp_max_List::call(self, other);
11710}
11711
11712// aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> ()
11714 return at::_ops::_foreach_clamp_max__List::call(self, other);
11715}
11716
11717// aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]
11718inline ::std::vector<at::Tensor> _foreach_maximum(at::TensorList self, at::TensorList other) {
11719 return at::_ops::_foreach_maximum_List::call(self, other);
11720}
11721
11722// aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
11724 return at::_ops::_foreach_maximum__List::call(self, other);
11725}
11726
11727// aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]
11728inline ::std::vector<at::Tensor> _foreach_minimum(at::TensorList self, at::TensorList other) {
11729 return at::_ops::_foreach_minimum_List::call(self, other);
11730}
11731
11732// aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()
11734 return at::_ops::_foreach_minimum__List::call(self, other);
11735}
11736
11737// aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
11738inline ::std::vector<at::Tensor> _foreach_add(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11739 return at::_ops::_foreach_add_ScalarList::call(self, scalars);
11740}
11741
11742// aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
11743inline void _foreach_add_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11744 return at::_ops::_foreach_add__ScalarList::call(self, scalars);
11745}
11746
11747// aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
11748inline ::std::vector<at::Tensor> _foreach_sub(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11749 return at::_ops::_foreach_sub_ScalarList::call(self, scalars);
11750}
11751
11752// aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
11753inline void _foreach_sub_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11754 return at::_ops::_foreach_sub__ScalarList::call(self, scalars);
11755}
11756
11757// aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
11758inline ::std::vector<at::Tensor> _foreach_div(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11759 return at::_ops::_foreach_div_ScalarList::call(self, scalars);
11760}
11761
11762// aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
11763inline void _foreach_div_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11764 return at::_ops::_foreach_div__ScalarList::call(self, scalars);
11765}
11766
11767// aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
11768inline ::std::vector<at::Tensor> _foreach_mul(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11769 return at::_ops::_foreach_mul_ScalarList::call(self, scalars);
11770}
11771
11772// aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
11773inline void _foreach_mul_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11774 return at::_ops::_foreach_mul__ScalarList::call(self, scalars);
11775}
11776
11777// aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
11778inline ::std::vector<at::Tensor> _foreach_clamp_min(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11779 return at::_ops::_foreach_clamp_min_ScalarList::call(self, scalars);
11780}
11781
11782// aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
11783inline void _foreach_clamp_min_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11784 return at::_ops::_foreach_clamp_min__ScalarList::call(self, scalars);
11785}
11786
11787// aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
11788inline ::std::vector<at::Tensor> _foreach_clamp_max(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11789 return at::_ops::_foreach_clamp_max_ScalarList::call(self, scalars);
11790}
11791
11792// aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
11793inline void _foreach_clamp_max_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11794 return at::_ops::_foreach_clamp_max__ScalarList::call(self, scalars);
11795}
11796
11797// aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
11798inline ::std::vector<at::Tensor> _foreach_maximum(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11799 return at::_ops::_foreach_maximum_ScalarList::call(self, scalars);
11800}
11801
11802// aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
11803inline void _foreach_maximum_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11804 return at::_ops::_foreach_maximum__ScalarList::call(self, scalars);
11805}
11806
11807// aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]
11808inline ::std::vector<at::Tensor> _foreach_minimum(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11809 return at::_ops::_foreach_minimum_ScalarList::call(self, scalars);
11810}
11811
11812// aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()
11813inline void _foreach_minimum_(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
11814 return at::_ops::_foreach_minimum__ScalarList::call(self, scalars);
11815}
11816
11817// aten::_foreach_exp(Tensor[] self) -> Tensor[]
11818inline ::std::vector<at::Tensor> _foreach_exp(at::TensorList self) {
11819 return at::_ops::_foreach_exp::call(self);
11820}
11821
11822// aten::_foreach_zero_(Tensor(a!)[] self) -> ()
11824 return at::_ops::_foreach_zero_::call(self);
11825}
11826
11827// aten::_foreach_exp_(Tensor(a!)[] self) -> ()
11829 return at::_ops::_foreach_exp_::call(self);
11830}
11831
11832// aten::_foreach_sqrt(Tensor[] self) -> Tensor[]
11833inline ::std::vector<at::Tensor> _foreach_sqrt(at::TensorList self) {
11834 return at::_ops::_foreach_sqrt::call(self);
11835}
11836
11837// aten::_foreach_sqrt_(Tensor(a!)[] self) -> ()
11839 return at::_ops::_foreach_sqrt_::call(self);
11840}
11841
11842// aten::_foreach_abs(Tensor[] self) -> Tensor[]
11843inline ::std::vector<at::Tensor> _foreach_abs(at::TensorList self) {
11844 return at::_ops::_foreach_abs::call(self);
11845}
11846
11847// aten::_foreach_abs_(Tensor(a!)[] self) -> ()
11849 return at::_ops::_foreach_abs_::call(self);
11850}
11851
11852// aten::_foreach_acos(Tensor[] self) -> Tensor[]
11853inline ::std::vector<at::Tensor> _foreach_acos(at::TensorList self) {
11854 return at::_ops::_foreach_acos::call(self);
11855}
11856
11857// aten::_foreach_acos_(Tensor(a!)[] self) -> ()
11859 return at::_ops::_foreach_acos_::call(self);
11860}
11861
11862// aten::_foreach_asin(Tensor[] self) -> Tensor[]
11863inline ::std::vector<at::Tensor> _foreach_asin(at::TensorList self) {
11864 return at::_ops::_foreach_asin::call(self);
11865}
11866
11867// aten::_foreach_asin_(Tensor(a!)[] self) -> ()
11869 return at::_ops::_foreach_asin_::call(self);
11870}
11871
11872// aten::_foreach_atan(Tensor[] self) -> Tensor[]
11873inline ::std::vector<at::Tensor> _foreach_atan(at::TensorList self) {
11874 return at::_ops::_foreach_atan::call(self);
11875}
11876
11877// aten::_foreach_atan_(Tensor(a!)[] self) -> ()
11879 return at::_ops::_foreach_atan_::call(self);
11880}
11881
11882// aten::_foreach_ceil(Tensor[] self) -> Tensor[]
11883inline ::std::vector<at::Tensor> _foreach_ceil(at::TensorList self) {
11884 return at::_ops::_foreach_ceil::call(self);
11885}
11886
11887// aten::_foreach_ceil_(Tensor(a!)[] self) -> ()
11889 return at::_ops::_foreach_ceil_::call(self);
11890}
11891
11892// aten::_foreach_cos(Tensor[] self) -> Tensor[]
11893inline ::std::vector<at::Tensor> _foreach_cos(at::TensorList self) {
11894 return at::_ops::_foreach_cos::call(self);
11895}
11896
11897// aten::_foreach_cos_(Tensor(a!)[] self) -> ()
11899 return at::_ops::_foreach_cos_::call(self);
11900}
11901
11902// aten::_foreach_cosh(Tensor[] self) -> Tensor[]
11903inline ::std::vector<at::Tensor> _foreach_cosh(at::TensorList self) {
11904 return at::_ops::_foreach_cosh::call(self);
11905}
11906
11907// aten::_foreach_cosh_(Tensor(a!)[] self) -> ()
11909 return at::_ops::_foreach_cosh_::call(self);
11910}
11911
11912// aten::_foreach_erf(Tensor[] self) -> Tensor[]
11913inline ::std::vector<at::Tensor> _foreach_erf(at::TensorList self) {
11914 return at::_ops::_foreach_erf::call(self);
11915}
11916
11917// aten::_foreach_erf_(Tensor(a!)[] self) -> ()
11919 return at::_ops::_foreach_erf_::call(self);
11920}
11921
11922// aten::_foreach_erfc(Tensor[] self) -> Tensor[]
11923inline ::std::vector<at::Tensor> _foreach_erfc(at::TensorList self) {
11924 return at::_ops::_foreach_erfc::call(self);
11925}
11926
11927// aten::_foreach_erfc_(Tensor(a!)[] self) -> ()
11929 return at::_ops::_foreach_erfc_::call(self);
11930}
11931
11932// aten::_foreach_expm1(Tensor[] self) -> Tensor[]
11933inline ::std::vector<at::Tensor> _foreach_expm1(at::TensorList self) {
11934 return at::_ops::_foreach_expm1::call(self);
11935}
11936
11937// aten::_foreach_expm1_(Tensor(a!)[] self) -> ()
11939 return at::_ops::_foreach_expm1_::call(self);
11940}
11941
11942// aten::_foreach_floor(Tensor[] self) -> Tensor[]
11943inline ::std::vector<at::Tensor> _foreach_floor(at::TensorList self) {
11944 return at::_ops::_foreach_floor::call(self);
11945}
11946
11947// aten::_foreach_floor_(Tensor(a!)[] self) -> ()
11949 return at::_ops::_foreach_floor_::call(self);
11950}
11951
11952// aten::_foreach_log(Tensor[] self) -> Tensor[]
11953inline ::std::vector<at::Tensor> _foreach_log(at::TensorList self) {
11954 return at::_ops::_foreach_log::call(self);
11955}
11956
11957// aten::_foreach_log_(Tensor(a!)[] self) -> ()
11959 return at::_ops::_foreach_log_::call(self);
11960}
11961
11962// aten::_foreach_log10(Tensor[] self) -> Tensor[]
11963inline ::std::vector<at::Tensor> _foreach_log10(at::TensorList self) {
11964 return at::_ops::_foreach_log10::call(self);
11965}
11966
11967// aten::_foreach_log10_(Tensor(a!)[] self) -> ()
11969 return at::_ops::_foreach_log10_::call(self);
11970}
11971
11972// aten::_foreach_log1p(Tensor[] self) -> Tensor[]
11973inline ::std::vector<at::Tensor> _foreach_log1p(at::TensorList self) {
11974 return at::_ops::_foreach_log1p::call(self);
11975}
11976
11977// aten::_foreach_log1p_(Tensor(a!)[] self) -> ()
11979 return at::_ops::_foreach_log1p_::call(self);
11980}
11981
11982// aten::_foreach_log2(Tensor[] self) -> Tensor[]
11983inline ::std::vector<at::Tensor> _foreach_log2(at::TensorList self) {
11984 return at::_ops::_foreach_log2::call(self);
11985}
11986
11987// aten::_foreach_log2_(Tensor(a!)[] self) -> ()
11989 return at::_ops::_foreach_log2_::call(self);
11990}
11991
11992// aten::_foreach_neg(Tensor[] self) -> Tensor[]
11993inline ::std::vector<at::Tensor> _foreach_neg(at::TensorList self) {
11994 return at::_ops::_foreach_neg::call(self);
11995}
11996
11997// aten::_foreach_neg_(Tensor(a!)[] self) -> ()
11999 return at::_ops::_foreach_neg_::call(self);
12000}
12001
12002// aten::_foreach_tan(Tensor[] self) -> Tensor[]
12003inline ::std::vector<at::Tensor> _foreach_tan(at::TensorList self) {
12004 return at::_ops::_foreach_tan::call(self);
12005}
12006
12007// aten::_foreach_tan_(Tensor(a!)[] self) -> ()
12009 return at::_ops::_foreach_tan_::call(self);
12010}
12011
12012// aten::_foreach_tanh(Tensor[] self) -> Tensor[]
12013inline ::std::vector<at::Tensor> _foreach_tanh(at::TensorList self) {
12014 return at::_ops::_foreach_tanh::call(self);
12015}
12016
12017// aten::_foreach_tanh_(Tensor(a!)[] self) -> ()
12019 return at::_ops::_foreach_tanh_::call(self);
12020}
12021
12022// aten::_foreach_sin(Tensor[] self) -> Tensor[]
12023inline ::std::vector<at::Tensor> _foreach_sin(at::TensorList self) {
12024 return at::_ops::_foreach_sin::call(self);
12025}
12026
12027// aten::_foreach_sin_(Tensor(a!)[] self) -> ()
12029 return at::_ops::_foreach_sin_::call(self);
12030}
12031
12032// aten::_foreach_sinh(Tensor[] self) -> Tensor[]
12033inline ::std::vector<at::Tensor> _foreach_sinh(at::TensorList self) {
12034 return at::_ops::_foreach_sinh::call(self);
12035}
12036
12037// aten::_foreach_sinh_(Tensor(a!)[] self) -> ()
12039 return at::_ops::_foreach_sinh_::call(self);
12040}
12041
12042// aten::_foreach_round(Tensor[] self) -> Tensor[]
12043inline ::std::vector<at::Tensor> _foreach_round(at::TensorList self) {
12044 return at::_ops::_foreach_round::call(self);
12045}
12046
12047// aten::_foreach_round_(Tensor(a!)[] self) -> ()
12049 return at::_ops::_foreach_round_::call(self);
12050}
12051
12052// aten::_foreach_lgamma(Tensor[] self) -> Tensor[]
12053inline ::std::vector<at::Tensor> _foreach_lgamma(at::TensorList self) {
12054 return at::_ops::_foreach_lgamma::call(self);
12055}
12056
12057// aten::_foreach_lgamma_(Tensor(a!)[] self) -> ()
12059 return at::_ops::_foreach_lgamma_::call(self);
12060}
12061
12062// aten::_foreach_frac(Tensor[] self) -> Tensor[]
12063inline ::std::vector<at::Tensor> _foreach_frac(at::TensorList self) {
12064 return at::_ops::_foreach_frac::call(self);
12065}
12066
12067// aten::_foreach_frac_(Tensor(a!)[] self) -> ()
12069 return at::_ops::_foreach_frac_::call(self);
12070}
12071
12072// aten::_foreach_reciprocal(Tensor[] self) -> Tensor[]
12073inline ::std::vector<at::Tensor> _foreach_reciprocal(at::TensorList self) {
12074 return at::_ops::_foreach_reciprocal::call(self);
12075}
12076
12077// aten::_foreach_reciprocal_(Tensor(a!)[] self) -> ()
12079 return at::_ops::_foreach_reciprocal_::call(self);
12080}
12081
12082// aten::_foreach_sigmoid(Tensor[] self) -> Tensor[]
12083inline ::std::vector<at::Tensor> _foreach_sigmoid(at::TensorList self) {
12084 return at::_ops::_foreach_sigmoid::call(self);
12085}
12086
12087// aten::_foreach_sigmoid_(Tensor(a!)[] self) -> ()
12089 return at::_ops::_foreach_sigmoid_::call(self);
12090}
12091
12092// aten::_foreach_trunc(Tensor[] self) -> Tensor[]
12093inline ::std::vector<at::Tensor> _foreach_trunc(at::TensorList self) {
12094 return at::_ops::_foreach_trunc::call(self);
12095}
12096
12097// aten::_foreach_trunc_(Tensor(a!)[] self) -> ()
12099 return at::_ops::_foreach_trunc_::call(self);
12100}
12101
12102// aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
12103inline void _foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) {
12104 return at::_ops::_foreach_addcdiv__Scalar::call(self, tensor1, tensor2, value);
12105}
12106
12107// aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()
12108inline void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) {
12109 return at::_ops::_foreach_addcmul__Scalar::call(self, tensor1, tensor2, value);
12110}
12111
12112// aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
12113inline void _foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
12114 return at::_ops::_foreach_addcdiv__ScalarList::call(self, tensor1, tensor2, scalars);
12115}
12116
12117// aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
12118inline void _foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
12119 return at::_ops::_foreach_addcdiv__Tensor::call(self, tensor1, tensor2, scalars);
12120}
12121
12122// aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()
12123inline void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
12124 return at::_ops::_foreach_addcmul__ScalarList::call(self, tensor1, tensor2, scalars);
12125}
12126
12127// aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> ()
12128inline void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
12129 return at::_ops::_foreach_addcmul__Tensor::call(self, tensor1, tensor2, scalars);
12130}
12131
12132// aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
12133inline ::std::vector<at::Tensor> _foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) {
12134 return at::_ops::_foreach_addcdiv_Scalar::call(self, tensor1, tensor2, value);
12135}
12136
12137// aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]
12138inline ::std::vector<at::Tensor> _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) {
12139 return at::_ops::_foreach_addcmul_Scalar::call(self, tensor1, tensor2, value);
12140}
12141
12142// aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
12143inline ::std::vector<at::Tensor> _foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
12144 return at::_ops::_foreach_addcdiv_ScalarList::call(self, tensor1, tensor2, scalars);
12145}
12146
12147// aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
12148inline ::std::vector<at::Tensor> _foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
12149 return at::_ops::_foreach_addcdiv_Tensor::call(self, tensor1, tensor2, scalars);
12150}
12151
12152// aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]
12153inline ::std::vector<at::Tensor> _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
12154 return at::_ops::_foreach_addcmul_ScalarList::call(self, tensor1, tensor2, scalars);
12155}
12156
12157// aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[]
12158inline ::std::vector<at::Tensor> _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
12159 return at::_ops::_foreach_addcmul_Tensor::call(self, tensor1, tensor2, scalars);
12160}
12161
12162// aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[]
12163inline ::std::vector<at::Tensor> _foreach_norm(at::TensorList self, const at::Scalar & ord=2) {
12164 return at::_ops::_foreach_norm_Scalar::call(self, ord);
12165}
12166
12167// aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]
12168inline ::std::vector<at::Tensor> _foreach_lerp(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
12169 return at::_ops::_foreach_lerp_List::call(self, tensors1, weights);
12170}
12171
12172// aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()
12173inline void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
12174 return at::_ops::_foreach_lerp__List::call(self, tensors1, weights);
12175}
12176
12177// aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]
12178inline ::std::vector<at::Tensor> _foreach_lerp(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
12179 return at::_ops::_foreach_lerp_Scalar::call(self, tensors1, weight);
12180}
12181
12182// aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()
12183inline void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
12184 return at::_ops::_foreach_lerp__Scalar::call(self, tensors1, weight);
12185}
12186
12187// aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
12188inline at::Tensor bucketize(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false) {
12189 return at::_ops::bucketize_Tensor::call(self, boundaries, out_int32, right);
12190}
12191
12192// aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
12193inline at::Tensor & bucketize_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false) {
12194 return at::_ops::bucketize_Tensor_out::call(self, boundaries, out_int32, right, out);
12195}
12196// aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
12197inline at::Tensor & bucketize_outf(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
12198 return at::_ops::bucketize_Tensor_out::call(self, boundaries, out_int32, right, out);
12199}
12200
12201// aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
12202inline at::Tensor bucketize(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false) {
12203 return at::_ops::bucketize_Scalar::call(self, boundaries, out_int32, right);
12204}
12205
12206// aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
12207inline at::Tensor searchsorted(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional<c10::string_view> side=c10::nullopt, const c10::optional<at::Tensor> & sorter={}) {
12208 return at::_ops::searchsorted_Tensor::call(sorted_sequence, self, out_int32, right, side, sorter);
12209}
12210
12211// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
12212inline at::Tensor & searchsorted_out(at::Tensor & out, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional<c10::string_view> side=c10::nullopt, const c10::optional<at::Tensor> & sorter={}) {
12213 return at::_ops::searchsorted_Tensor_out::call(sorted_sequence, self, out_int32, right, side, sorter, out);
12214}
12215// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
12216inline at::Tensor & searchsorted_outf(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) {
12217 return at::_ops::searchsorted_Tensor_out::call(sorted_sequence, self, out_int32, right, side, sorter, out);
12218}
12219
12220// aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor
12221inline at::Tensor searchsorted(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional<c10::string_view> side=c10::nullopt, const c10::optional<at::Tensor> & sorter={}) {
12222 return at::_ops::searchsorted_Scalar::call(sorted_sequence, self, out_int32, right, side, sorter);
12223}
12224
12225// aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor
12226inline at::Tensor _convert_indices_from_coo_to_csr(const at::Tensor & self, int64_t size, bool out_int32=false) {
12227 return at::_ops::_convert_indices_from_coo_to_csr::call(self, size, out_int32);
12228}
12229
12230// aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
12231inline at::Tensor & _convert_indices_from_coo_to_csr_out(at::Tensor & out, const at::Tensor & self, int64_t size, bool out_int32=false) {
12232 return at::_ops::_convert_indices_from_coo_to_csr_out::call(self, size, out_int32, out);
12233}
12234// aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
12235inline at::Tensor & _convert_indices_from_coo_to_csr_outf(const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) {
12236 return at::_ops::_convert_indices_from_coo_to_csr_out::call(self, size, out_int32, out);
12237}
12238
12239// aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor
12240inline at::Tensor _convert_indices_from_csr_to_coo(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false) {
12241 return at::_ops::_convert_indices_from_csr_to_coo::call(crow_indices, col_indices, out_int32, transpose);
12242}
12243
12244// aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
12245inline at::Tensor & _convert_indices_from_csr_to_coo_out(at::Tensor & out, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false) {
12246 return at::_ops::_convert_indices_from_csr_to_coo_out::call(crow_indices, col_indices, out_int32, transpose, out);
12247}
12248// aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)
12249inline at::Tensor & _convert_indices_from_csr_to_coo_outf(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out) {
12250 return at::_ops::_convert_indices_from_csr_to_coo_out::call(crow_indices, col_indices, out_int32, transpose, out);
12251}
12252
12253// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
12254inline at::Tensor & mse_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) {
12255 return at::_ops::mse_loss_out::call(self, target, reduction, out);
12256}
12257// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
12258inline at::Tensor & mse_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
12259 return at::_ops::mse_loss_out::call(self, target, reduction, out);
12260}
12261
12262// aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
12263inline at::Tensor mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) {
12264 return at::_ops::mse_loss::call(self, target, reduction);
12265}
12266
12267// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
12268inline at::Tensor & mse_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
12269 return at::_ops::mse_loss_backward_grad_input::call(grad_output, self, target, reduction, grad_input);
12270}
12271// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
12272inline at::Tensor & mse_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
12273 return at::_ops::mse_loss_backward_grad_input::call(grad_output, self, target, reduction, grad_input);
12274}
12275
12276// aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
12277inline at::Tensor mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
12278 return at::_ops::mse_loss_backward::call(grad_output, self, target, reduction);
12279}
12280
12281// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
12282inline at::Tensor l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) {
12283 return at::_ops::l1_loss::call(self, target, reduction);
12284}
12285
12286// aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
12287inline at::Tensor & multi_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean) {
12288 return at::_ops::multi_margin_loss_out::call(self, target, p, margin, weight, reduction, out);
12289}
12290// aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
12291inline at::Tensor & multi_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
12292 return at::_ops::multi_margin_loss_out::call(self, target, p, margin, weight, reduction, out);
12293}
12294
12295// aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
12296inline at::Tensor multi_margin_loss(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean) {
12297 return at::_ops::multi_margin_loss::call(self, target, p, margin, weight, reduction);
12298}
12299
12300// aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
12301inline at::Tensor & multi_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean) {
12302 return at::_ops::multi_margin_loss_backward_grad_input::call(grad_output, self, target, p, margin, weight, reduction, grad_input);
12303}
12304// aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
12305inline at::Tensor & multi_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
12306 return at::_ops::multi_margin_loss_backward_grad_input::call(grad_output, self, target, p, margin, weight, reduction, grad_input);
12307}
12308
12309// aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor
12310inline at::Tensor multi_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean) {
12311 return at::_ops::multi_margin_loss_backward::call(grad_output, self, target, p, margin, weight, reduction);
12312}
12313
12314// aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
12315inline at::Tensor & multilabel_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) {
12316 return at::_ops::multilabel_margin_loss_out::call(self, target, reduction, out);
12317}
12318// aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
12319inline at::Tensor & multilabel_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
12320 return at::_ops::multilabel_margin_loss_out::call(self, target, reduction, out);
12321}
12322
12323// aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
12324inline at::Tensor multilabel_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) {
12325 return at::_ops::multilabel_margin_loss::call(self, target, reduction);
12326}
12327
12328// aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
12329inline ::std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_out(at::Tensor & output, at::Tensor & is_target, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
12330 return at::_ops::multilabel_margin_loss_forward_output::call(self, target, reduction, output, is_target);
12331}
12332// aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))
12333inline ::std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) {
12334 return at::_ops::multilabel_margin_loss_forward_output::call(self, target, reduction, output, is_target);
12335}
12336
12337// aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)
12338inline ::std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
12339 return at::_ops::multilabel_margin_loss_forward::call(self, target, reduction);
12340}
12341
12342// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
12343inline at::Tensor & multilabel_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
12344 return at::_ops::multilabel_margin_loss_backward_grad_input::call(grad_output, self, target, reduction, is_target, grad_input);
12345}
12346// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
12347inline at::Tensor & multilabel_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) {
12348 return at::_ops::multilabel_margin_loss_backward_grad_input::call(grad_output, self, target, reduction, is_target, grad_input);
12349}
12350
12351// aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor
12352inline at::Tensor multilabel_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
12353 return at::_ops::multilabel_margin_loss_backward::call(grad_output, self, target, reduction, is_target);
12354}
12355
12356// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
12357inline at::Tensor & nll_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) {
12358 return at::_ops::nll_loss_out::call(self, target, weight, reduction, ignore_index, out);
12359}
12360namespace symint {
12361 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12362 at::Tensor & nll_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) {
12363 return at::_ops::nll_loss_out::call(self, target, weight, reduction, ignore_index, out);
12364 }
12365}
12366
12367// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
12368inline at::Tensor & nll_loss_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) {
12369 return at::_ops::nll_loss_out::call(self, target, weight, reduction, ignore_index, out);
12370}
12371namespace symint {
12372 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12373 at::Tensor & nll_loss_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) {
12374 return at::_ops::nll_loss_out::call(self, target, weight, reduction, ignore_index, out);
12375 }
12376}
12377
12378// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
12379inline at::Tensor & nll_loss_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) {
12380 return at::_ops::nll_loss_out::call(self, target, weight, reduction, ignore_index, out);
12381}
12382namespace symint {
12383 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12384 at::Tensor & nll_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) {
12385 return at::_ops::nll_loss_out::call(self, target, weight, reduction, ignore_index, out);
12386 }
12387}
12388
12389// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
12390inline at::Tensor & nll_loss_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
12391 return at::_ops::nll_loss_out::call(self, target, weight, reduction, ignore_index, out);
12392}
12393namespace symint {
12394 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12395 at::Tensor & nll_loss_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
12396 return at::_ops::nll_loss_out::call(self, target, weight, reduction, ignore_index, out);
12397 }
12398}
12399
12400// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
12401inline at::Tensor nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) {
12402 return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index);
12403}
12404namespace symint {
12405 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12406 at::Tensor nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) {
12407 return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index);
12408 }
12409}
12410
12411// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
12412inline at::Tensor nll_loss_nd_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) {
12413 return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index);
12414}
12415namespace symint {
12416 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12417 at::Tensor nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) {
12418 return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index);
12419 }
12420}
12421
12422// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
12423inline at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) {
12424 return at::_ops::nll_loss::call(self, target, weight, reduction, ignore_index);
12425}
12426namespace symint {
12427 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12428 at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) {
12429 return at::_ops::nll_loss::call(self, target, weight, reduction, ignore_index);
12430 }
12431}
12432
12433// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
12434inline at::Tensor nll_loss_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) {
12435 return at::_ops::nll_loss::call(self, target, weight, reduction, ignore_index);
12436}
12437namespace symint {
12438 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12439 at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) {
12440 return at::_ops::nll_loss::call(self, target, weight, reduction, ignore_index);
12441 }
12442}
12443
12444// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
12445inline ::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
12446 return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12447}
12448namespace symint {
12449 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12450 ::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
12451 return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12452 }
12453}
12454
12455// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
12456inline ::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) {
12457 return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12458}
12459namespace symint {
12460 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12461 ::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) {
12462 return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12463 }
12464}
12465
12466// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
12467inline ::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_symint_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
12468 return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12469}
12470namespace symint {
12471 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12472 ::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
12473 return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12474 }
12475}
12476
12477// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
12478inline ::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
12479 return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12480}
12481namespace symint {
12482 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12483 ::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
12484 return at::_ops::nll_loss_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12485 }
12486}
12487
12488// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
12489inline ::std::tuple<at::Tensor,at::Tensor> nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
12490 return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index);
12491}
12492namespace symint {
12493 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12494 ::std::tuple<at::Tensor,at::Tensor> nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
12495 return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index);
12496 }
12497}
12498
12499// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
12500inline ::std::tuple<at::Tensor,at::Tensor> nll_loss_forward_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
12501 return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index);
12502}
12503namespace symint {
12504 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12505 ::std::tuple<at::Tensor,at::Tensor> nll_loss_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
12506 return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index);
12507 }
12508}
12509
12510// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
12511inline at::Tensor & nll_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
12512 return at::_ops::nll_loss_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12513}
12514namespace symint {
12515 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12516 at::Tensor & nll_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
12517 return at::_ops::nll_loss_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12518 }
12519}
12520
12521// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
12522inline at::Tensor & nll_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
12523 return at::_ops::nll_loss_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12524}
12525namespace symint {
12526 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12527 at::Tensor & nll_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
12528 return at::_ops::nll_loss_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12529 }
12530}
12531
12532// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
12533inline at::Tensor & nll_loss_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
12534 return at::_ops::nll_loss_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12535}
12536namespace symint {
12537 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12538 at::Tensor & nll_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
12539 return at::_ops::nll_loss_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12540 }
12541}
12542
12543// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
12544inline at::Tensor & nll_loss_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
12545 return at::_ops::nll_loss_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12546}
12547namespace symint {
12548 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12549 at::Tensor & nll_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
12550 return at::_ops::nll_loss_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12551 }
12552}
12553
12554// aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
12555inline at::Tensor nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
12556 return at::_ops::nll_loss_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
12557}
12558namespace symint {
12559 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12560 at::Tensor nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
12561 return at::_ops::nll_loss_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
12562 }
12563}
12564
12565// aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
12566inline at::Tensor nll_loss_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
12567 return at::_ops::nll_loss_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
12568}
12569namespace symint {
12570 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12571 at::Tensor nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
12572 return at::_ops::nll_loss_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
12573 }
12574}
12575
12576// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
12577inline at::Tensor & nll_loss2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) {
12578 return at::_ops::nll_loss2d_out::call(self, target, weight, reduction, ignore_index, out);
12579}
12580namespace symint {
12581 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12582 at::Tensor & nll_loss2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) {
12583 return at::_ops::nll_loss2d_out::call(self, target, weight, reduction, ignore_index, out);
12584 }
12585}
12586
12587// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
12588inline at::Tensor & nll_loss2d_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) {
12589 return at::_ops::nll_loss2d_out::call(self, target, weight, reduction, ignore_index, out);
12590}
12591namespace symint {
12592 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12593 at::Tensor & nll_loss2d_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) {
12594 return at::_ops::nll_loss2d_out::call(self, target, weight, reduction, ignore_index, out);
12595 }
12596}
12597
12598// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
12599inline at::Tensor & nll_loss2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) {
12600 return at::_ops::nll_loss2d_out::call(self, target, weight, reduction, ignore_index, out);
12601}
12602namespace symint {
12603 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12604 at::Tensor & nll_loss2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) {
12605 return at::_ops::nll_loss2d_out::call(self, target, weight, reduction, ignore_index, out);
12606 }
12607}
12608
12609// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
12610inline at::Tensor & nll_loss2d_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
12611 return at::_ops::nll_loss2d_out::call(self, target, weight, reduction, ignore_index, out);
12612}
12613namespace symint {
12614 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12615 at::Tensor & nll_loss2d_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
12616 return at::_ops::nll_loss2d_out::call(self, target, weight, reduction, ignore_index, out);
12617 }
12618}
12619
12620// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
12621inline at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) {
12622 return at::_ops::nll_loss2d::call(self, target, weight, reduction, ignore_index);
12623}
12624namespace symint {
12625 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12626 at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) {
12627 return at::_ops::nll_loss2d::call(self, target, weight, reduction, ignore_index);
12628 }
12629}
12630
12631// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor
12632inline at::Tensor nll_loss2d_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) {
12633 return at::_ops::nll_loss2d::call(self, target, weight, reduction, ignore_index);
12634}
12635namespace symint {
12636 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12637 at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) {
12638 return at::_ops::nll_loss2d::call(self, target, weight, reduction, ignore_index);
12639 }
12640}
12641
12642// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
12643inline ::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
12644 return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12645}
12646namespace symint {
12647 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12648 ::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
12649 return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12650 }
12651}
12652
12653// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
12654inline ::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) {
12655 return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12656}
12657namespace symint {
12658 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12659 ::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) {
12660 return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12661 }
12662}
12663
12664// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
12665inline ::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_symint_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
12666 return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12667}
12668namespace symint {
12669 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12670 ::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
12671 return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12672 }
12673}
12674
12675// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
12676inline ::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_symint_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
12677 return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12678}
12679namespace symint {
12680 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12681 ::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
12682 return at::_ops::nll_loss2d_forward_output::call(self, target, weight, reduction, ignore_index, output, total_weight);
12683 }
12684}
12685
12686// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
12687inline ::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
12688 return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index);
12689}
12690namespace symint {
12691 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12692 ::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index) {
12693 return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index);
12694 }
12695}
12696
12697// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight)
12698inline ::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
12699 return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index);
12700}
12701namespace symint {
12702 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12703 ::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
12704 return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index);
12705 }
12706}
12707
12708// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
12709inline at::Tensor & nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
12710 return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12711}
12712namespace symint {
12713 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12714 at::Tensor & nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
12715 return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12716 }
12717}
12718
12719// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
12720inline at::Tensor & nll_loss2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
12721 return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12722}
12723namespace symint {
12724 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12725 at::Tensor & nll_loss2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
12726 return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12727 }
12728}
12729
12730// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
12731inline at::Tensor & nll_loss2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
12732 return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12733}
12734namespace symint {
12735 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12736 at::Tensor & nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
12737 return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12738 }
12739}
12740
12741// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
12742inline at::Tensor & nll_loss2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
12743 return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12744}
12745namespace symint {
12746 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12747 at::Tensor & nll_loss2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
12748 return at::_ops::nll_loss2d_backward_grad_input::call(grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input);
12749 }
12750}
12751
12752// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
12753inline at::Tensor nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
12754 return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
12755}
12756namespace symint {
12757 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
12758 at::Tensor nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) {
12759 return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
12760 }
12761}
12762
12763// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor
12764inline at::Tensor nll_loss2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
12765 return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
12766}
12767namespace symint {
12768 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
12769 at::Tensor nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
12770 return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
12771 }
12772}
12773
12774// aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)
12775inline at::Tensor & smooth_l1_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double beta=1.0) {
12776 return at::_ops::smooth_l1_loss_out::call(self, target, reduction, beta, out);
12777}
12778// aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)
12779inline at::Tensor & smooth_l1_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out) {
12780 return at::_ops::smooth_l1_loss_out::call(self, target, reduction, beta, out);
12781}
12782
12783// aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor
12784inline at::Tensor smooth_l1_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double beta=1.0) {
12785 return at::_ops::smooth_l1_loss::call(self, target, reduction, beta);
12786}
12787
12788// aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)
12789inline at::Tensor & smooth_l1_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
12790 return at::_ops::smooth_l1_loss_backward_grad_input::call(grad_output, self, target, reduction, beta, grad_input);
12791}
12792// aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)
12793inline at::Tensor & smooth_l1_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) {
12794 return at::_ops::smooth_l1_loss_backward_grad_input::call(grad_output, self, target, reduction, beta, grad_input);
12795}
12796
12797// aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor
12798inline at::Tensor smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
12799 return at::_ops::smooth_l1_loss_backward::call(grad_output, self, target, reduction, beta);
12800}
12801
12802// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
12803inline at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) {
12804 return at::_ops::huber_loss_out::call(self, target, reduction, delta, out);
12805}
12806// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)
12807inline at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) {
12808 return at::_ops::huber_loss_out::call(self, target, reduction, delta, out);
12809}
12810
12811// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor
12812inline at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) {
12813 return at::_ops::huber_loss::call(self, target, reduction, delta);
12814}
12815
12816// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)
12817inline at::Tensor & huber_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
12818 return at::_ops::huber_loss_backward_out::call(grad_output, self, target, reduction, delta, grad_input);
12819}
12820// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)
12821inline at::Tensor & huber_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) {
12822 return at::_ops::huber_loss_backward_out::call(grad_output, self, target, reduction, delta, grad_input);
12823}
12824
12825// aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor
12826inline at::Tensor huber_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
12827 return at::_ops::huber_loss_backward::call(grad_output, self, target, reduction, delta);
12828}
12829
12830// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
12831inline at::Tensor & soft_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) {
12832 return at::_ops::soft_margin_loss_out::call(self, target, reduction, out);
12833}
12834// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
12835inline at::Tensor & soft_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
12836 return at::_ops::soft_margin_loss_out::call(self, target, reduction, out);
12837}
12838
12839// aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
12840inline at::Tensor soft_margin_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) {
12841 return at::_ops::soft_margin_loss::call(self, target, reduction);
12842}
12843
12844// aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
12845inline at::Tensor & soft_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
12846 return at::_ops::soft_margin_loss_backward_grad_input::call(grad_output, self, target, reduction, grad_input);
12847}
12848// aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
12849inline at::Tensor & soft_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
12850 return at::_ops::soft_margin_loss_backward_grad_input::call(grad_output, self, target, reduction, grad_input);
12851}
12852
12853// aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
12854inline at::Tensor soft_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
12855 return at::_ops::soft_margin_loss_backward::call(grad_output, self, target, reduction);
12856}
12857
12858// aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
12859inline at::Tensor & elu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1) {
12860 return at::_ops::elu_out::call(self, alpha, scale, input_scale, out);
12861}
12862// aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
12863inline at::Tensor & elu_outf(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out) {
12864 return at::_ops::elu_out::call(self, alpha, scale, input_scale, out);
12865}
12866
12867// aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
12868inline at::Tensor elu(const at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1) {
12869 return at::_ops::elu::call(self, alpha, scale, input_scale);
12870}
12871
12872// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
12873inline at::Tensor & elu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
12874 return at::_ops::elu_backward_grad_input::call(grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input);
12875}
12876// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
12877inline at::Tensor & elu_backward_outf(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) {
12878 return at::_ops::elu_backward_grad_input::call(grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input);
12879}
12880
12881// aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor
12882inline at::Tensor elu_backward(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
12883 return at::_ops::elu_backward::call(grad_output, alpha, scale, input_scale, is_result, self_or_result);
12884}
12885
12886// aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
12887inline at::Tensor & elu_(at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1) {
12888 return at::_ops::elu_::call(self, alpha, scale, input_scale);
12889}
12890
12891// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
12892inline at::Tensor & glu_out(at::Tensor & out, const at::Tensor & self, int64_t dim=-1) {
12893 return at::_ops::glu_out::call(self, dim, out);
12894}
12895// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
12896inline at::Tensor & glu_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) {
12897 return at::_ops::glu_out::call(self, dim, out);
12898}
12899
12900// aten::glu(Tensor self, int dim=-1) -> Tensor
12901inline at::Tensor glu(const at::Tensor & self, int64_t dim=-1) {
12902 return at::_ops::glu::call(self, dim);
12903}
12904
12905// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
12906inline at::Tensor & glu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
12907 return at::_ops::glu_backward_grad_input::call(grad_output, self, dim, grad_input);
12908}
12909// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
12910inline at::Tensor & glu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) {
12911 return at::_ops::glu_backward_grad_input::call(grad_output, self, dim, grad_input);
12912}
12913
12914// aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
12915inline at::Tensor glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
12916 return at::_ops::glu_backward::call(grad_output, self, dim);
12917}
12918
12919// aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor
12920inline at::Tensor glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
12921 return at::_ops::glu_jvp::call(glu, x, dx, dim);
12922}
12923
12924// aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor
12925inline at::Tensor glu_backward_jvp(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
12926 return at::_ops::glu_backward_jvp::call(grad_x, grad_glu, x, dgrad_glu, dx, dim);
12927}
12928
12929// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12930inline at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self) {
12931 return at::_ops::hardsigmoid_out::call(self, out);
12932}
12933// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12934inline at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out) {
12935 return at::_ops::hardsigmoid_out::call(self, out);
12936}
12937
12938// aten::hardsigmoid(Tensor self) -> Tensor
12939inline at::Tensor hardsigmoid(const at::Tensor & self) {
12940 return at::_ops::hardsigmoid::call(self);
12941}
12942
12943// aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)
12945 return at::_ops::hardsigmoid_::call(self);
12946}
12947
12948// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
12949inline at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) {
12950 return at::_ops::hardsigmoid_backward_grad_input::call(grad_output, self, grad_input);
12951}
12952// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
12953inline at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
12954 return at::_ops::hardsigmoid_backward_grad_input::call(grad_output, self, grad_input);
12955}
12956
12957// aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
12958inline at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self) {
12959 return at::_ops::hardsigmoid_backward::call(grad_output, self);
12960}
12961
12962// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
12963inline at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) {
12964 return at::_ops::hardtanh_out::call(self, min_val, max_val, out);
12965}
12966// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
12967inline at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
12968 return at::_ops::hardtanh_out::call(self, min_val, max_val, out);
12969}
12970
12971// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
12972inline at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) {
12973 return at::_ops::hardtanh::call(self, min_val, max_val);
12974}
12975
12976// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
12977inline at::Tensor & hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
12978 return at::_ops::hardtanh_backward_grad_input::call(grad_output, self, min_val, max_val, grad_input);
12979}
12980// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
12981inline at::Tensor & hardtanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) {
12982 return at::_ops::hardtanh_backward_grad_input::call(grad_output, self, min_val, max_val, grad_input);
12983}
12984
12985// aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor
12986inline at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
12987 return at::_ops::hardtanh_backward::call(grad_output, self, min_val, max_val);
12988}
12989
12990// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)
12991inline at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) {
12992 return at::_ops::hardtanh_::call(self, min_val, max_val);
12993}
12994
12995// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
12996inline at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self) {
12997 return at::_ops::hardswish_out::call(self, out);
12998}
12999// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13000inline at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out) {
13001 return at::_ops::hardswish_out::call(self, out);
13002}
13003
13004// aten::hardswish(Tensor self) -> Tensor
13005inline at::Tensor hardswish(const at::Tensor & self) {
13006 return at::_ops::hardswish::call(self);
13007}
13008
13009// aten::hardswish_(Tensor(a!) self) -> Tensor(a!)
13011 return at::_ops::hardswish_::call(self);
13012}
13013
13014// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
13015inline at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self) {
13016 return at::_ops::hardswish_backward::call(grad_output, self);
13017}
13018
13019// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
13020inline at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01) {
13021 return at::_ops::leaky_relu_out::call(self, negative_slope, out);
13022}
13023// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
13024inline at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
13025 return at::_ops::leaky_relu_out::call(self, negative_slope, out);
13026}
13027
13028// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor
13029inline at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01) {
13030 return at::_ops::leaky_relu::call(self, negative_slope);
13031}
13032
13033// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
13034inline at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
13035 return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input);
13036}
13037// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
13038inline at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) {
13039 return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input);
13040}
13041
13042// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor
13043inline at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
13044 return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result);
13045}
13046
13047// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)
13048inline at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01) {
13049 return at::_ops::leaky_relu_::call(self, negative_slope);
13050}
13051
13052// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13053inline at::Tensor & log_sigmoid_out(at::Tensor & out, const at::Tensor & self) {
13054 return at::_ops::log_sigmoid_out::call(self, out);
13055}
13056// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
13057inline at::Tensor & log_sigmoid_outf(const at::Tensor & self, at::Tensor & out) {
13058 return at::_ops::log_sigmoid_out::call(self, out);
13059}
13060
13061// aten::log_sigmoid(Tensor self) -> Tensor
13062inline at::Tensor log_sigmoid(const at::Tensor & self) {
13063 return at::_ops::log_sigmoid::call(self);
13064}
13065
13066// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))
13067inline ::std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_out(at::Tensor & output, at::Tensor & buffer, const at::Tensor & self) {
13068 return at::_ops::log_sigmoid_forward_output::call(self, output, buffer);
13069}
13070// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))
13071inline ::std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_outf(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) {
13072 return at::_ops::log_sigmoid_forward_output::call(self, output, buffer);
13073}
13074
13075// aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)
13076inline ::std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward(const at::Tensor & self) {
13077 return at::_ops::log_sigmoid_forward::call(self);
13078}
13079
13080// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
13081inline at::Tensor & log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
13082 return at::_ops::log_sigmoid_backward_grad_input::call(grad_output, self, buffer, grad_input);
13083}
13084// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
13085inline at::Tensor & log_sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) {
13086 return at::_ops::log_sigmoid_backward_grad_input::call(grad_output, self, buffer, grad_input);
13087}
13088
13089// aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor
13090inline at::Tensor log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
13091 return at::_ops::log_sigmoid_backward::call(grad_output, self, buffer);
13092}
13093
13094// aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
13095inline at::Tensor & rrelu_with_noise_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional<at::Generator> generator=c10::nullopt) {
13096 return at::_ops::rrelu_with_noise_out::call(self, noise, lower, upper, training, generator, out);
13097}
13098// aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
13099inline at::Tensor & rrelu_with_noise_outf(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator, at::Tensor & out) {
13100 return at::_ops::rrelu_with_noise_out::call(self, noise, lower, upper, training, generator, out);
13101}
13102
13103// aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
13104inline at::Tensor rrelu_with_noise(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional<at::Generator> generator=c10::nullopt) {
13105 return at::_ops::rrelu_with_noise::call(self, noise, lower, upper, training, generator);
13106}
13107
13108// aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
13109inline at::Tensor rrelu_with_noise_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) {
13110 return at::_ops::rrelu_with_noise_backward::call(grad_output, self, noise, lower, upper, training, self_is_result);
13111}
13112
13113// aten::rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
13114inline at::Tensor & rrelu_with_noise_(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional<at::Generator> generator=c10::nullopt) {
13115 return at::_ops::rrelu_with_noise_::call(self, noise, lower, upper, training, generator);
13116}
13117
13118// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
13119inline at::Tensor & softplus_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20) {
13120 return at::_ops::softplus_out::call(self, beta, threshold, out);
13121}
13122// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
13123inline at::Tensor & softplus_outf(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) {
13124 return at::_ops::softplus_out::call(self, beta, threshold, out);
13125}
13126
13127// aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
13128inline at::Tensor softplus(const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20) {
13129 return at::_ops::softplus::call(self, beta, threshold);
13130}
13131
13132// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
13133inline at::Tensor & softplus_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
13134 return at::_ops::softplus_backward_grad_input::call(grad_output, self, beta, threshold, grad_input);
13135}
13136// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
13137inline at::Tensor & softplus_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) {
13138 return at::_ops::softplus_backward_grad_input::call(grad_output, self, beta, threshold, grad_input);
13139}
13140
13141// aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor
13142inline at::Tensor softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
13143 return at::_ops::softplus_backward::call(grad_output, self, beta, threshold);
13144}
13145
13146// aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
13147inline at::Tensor & softshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5) {
13148 return at::_ops::softshrink_out::call(self, lambd, out);
13149}
13150// aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
13151inline at::Tensor & softshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
13152 return at::_ops::softshrink_out::call(self, lambd, out);
13153}
13154
13155// aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
13156inline at::Tensor softshrink(const at::Tensor & self, const at::Scalar & lambd=0.5) {
13157 return at::_ops::softshrink::call(self, lambd);
13158}
13159
13160// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
13161inline at::Tensor & softshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
13162 return at::_ops::softshrink_backward_grad_input::call(grad_output, self, lambd, grad_input);
13163}
13164// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
13165inline at::Tensor & softshrink_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
13166 return at::_ops::softshrink_backward_grad_input::call(grad_output, self, lambd, grad_input);
13167}
13168
13169// aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
13170inline at::Tensor softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
13171 return at::_ops::softshrink_backward::call(grad_output, self, lambd);
13172}
13173
13174// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
13175inline at::Tensor & adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
13176 return at::_ops::adaptive_avg_pool2d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
13177}
13178namespace symint {
13179 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13180 at::Tensor & adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
13181 return at::_ops::adaptive_avg_pool2d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
13182 }
13183}
13184
13185// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
13186inline at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
13187 return at::_ops::adaptive_avg_pool2d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
13188}
13189namespace symint {
13190 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13191 at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
13192 return at::_ops::adaptive_avg_pool2d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
13193 }
13194}
13195
13196// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
13197inline at::Tensor & adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
13198 return at::_ops::adaptive_avg_pool2d_out::call(self, output_size, out);
13199}
13200namespace symint {
13201 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13202 at::Tensor & adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
13203 return at::_ops::adaptive_avg_pool2d_out::call(self, output_size, out);
13204 }
13205}
13206
13207// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
13208inline at::Tensor & adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
13209 return at::_ops::adaptive_avg_pool2d_out::call(self, output_size, out);
13210}
13211namespace symint {
13212 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13213 at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
13214 return at::_ops::adaptive_avg_pool2d_out::call(self, output_size, out);
13215 }
13216}
13217
13218// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
13219inline at::Tensor adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
13220 return at::_ops::adaptive_avg_pool2d::call(self, c10::fromIntArrayRefSlow(output_size));
13221}
13222namespace symint {
13223 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13224 at::Tensor adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
13225 return at::_ops::adaptive_avg_pool2d::call(self, c10::fromIntArrayRefSlow(output_size));
13226 }
13227}
13228
13229// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
13230inline at::Tensor adaptive_avg_pool2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) {
13231 return at::_ops::adaptive_avg_pool2d::call(self, output_size);
13232}
13233namespace symint {
13234 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13235 at::Tensor adaptive_avg_pool2d(const at::Tensor & self, c10::SymIntArrayRef output_size) {
13236 return at::_ops::adaptive_avg_pool2d::call(self, output_size);
13237 }
13238}
13239
13240// aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
13241inline at::Tensor mkldnn_adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
13242 return at::_ops::mkldnn_adaptive_avg_pool2d::call(self, output_size);
13243}
13244
13245// aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
13246inline at::Tensor & mkldnn_adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
13247 return at::_ops::mkldnn_adaptive_avg_pool2d_out::call(self, output_size, out);
13248}
13249// aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
13250inline at::Tensor & mkldnn_adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
13251 return at::_ops::mkldnn_adaptive_avg_pool2d_out::call(self, output_size, out);
13252}
13253
13254// aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
13255inline at::Tensor mkldnn_adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
13256 return at::_ops::mkldnn_adaptive_avg_pool2d_backward::call(grad_output, self);
13257}
13258
13259// aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
13260inline at::Tensor _adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
13261 return at::_ops::_adaptive_avg_pool2d::call(self, c10::fromIntArrayRefSlow(output_size));
13262}
13263namespace symint {
13264 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13265 at::Tensor _adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
13266 return at::_ops::_adaptive_avg_pool2d::call(self, c10::fromIntArrayRefSlow(output_size));
13267 }
13268}
13269
13270// aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor
13271inline at::Tensor _adaptive_avg_pool2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) {
13272 return at::_ops::_adaptive_avg_pool2d::call(self, output_size);
13273}
13274namespace symint {
13275 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13276 at::Tensor _adaptive_avg_pool2d(const at::Tensor & self, c10::SymIntArrayRef output_size) {
13277 return at::_ops::_adaptive_avg_pool2d::call(self, output_size);
13278 }
13279}
13280
13281// aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor
13282inline at::Tensor _adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
13283 return at::_ops::_adaptive_avg_pool2d_backward::call(grad_output, self);
13284}
13285
13286// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
13287inline at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
13288 return at::_ops::adaptive_avg_pool3d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
13289}
13290namespace symint {
13291 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13292 at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
13293 return at::_ops::adaptive_avg_pool3d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
13294 }
13295}
13296
13297// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
13298inline at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
13299 return at::_ops::adaptive_avg_pool3d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
13300}
13301namespace symint {
13302 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13303 at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
13304 return at::_ops::adaptive_avg_pool3d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
13305 }
13306}
13307
13308// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
13309inline at::Tensor & adaptive_avg_pool3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
13310 return at::_ops::adaptive_avg_pool3d_out::call(self, output_size, out);
13311}
13312namespace symint {
13313 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13314 at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
13315 return at::_ops::adaptive_avg_pool3d_out::call(self, output_size, out);
13316 }
13317}
13318
13319// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
13320inline at::Tensor & adaptive_avg_pool3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
13321 return at::_ops::adaptive_avg_pool3d_out::call(self, output_size, out);
13322}
13323namespace symint {
13324 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13325 at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
13326 return at::_ops::adaptive_avg_pool3d_out::call(self, output_size, out);
13327 }
13328}
13329
13330// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
13331inline at::Tensor adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
13332 return at::_ops::adaptive_avg_pool3d::call(self, c10::fromIntArrayRefSlow(output_size));
13333}
13334namespace symint {
13335 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13336 at::Tensor adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
13337 return at::_ops::adaptive_avg_pool3d::call(self, c10::fromIntArrayRefSlow(output_size));
13338 }
13339}
13340
13341// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
13342inline at::Tensor adaptive_avg_pool3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) {
13343 return at::_ops::adaptive_avg_pool3d::call(self, output_size);
13344}
13345namespace symint {
13346 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13347 at::Tensor adaptive_avg_pool3d(const at::Tensor & self, c10::SymIntArrayRef output_size) {
13348 return at::_ops::adaptive_avg_pool3d::call(self, output_size);
13349 }
13350}
13351
13352// aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
13353inline at::Tensor _adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
13354 return at::_ops::_adaptive_avg_pool3d::call(self, c10::fromIntArrayRefSlow(output_size));
13355}
13356namespace symint {
13357 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13358 at::Tensor _adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
13359 return at::_ops::_adaptive_avg_pool3d::call(self, c10::fromIntArrayRefSlow(output_size));
13360 }
13361}
13362
13363// aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor
13364inline at::Tensor _adaptive_avg_pool3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) {
13365 return at::_ops::_adaptive_avg_pool3d::call(self, output_size);
13366}
13367namespace symint {
13368 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13369 at::Tensor _adaptive_avg_pool3d(const at::Tensor & self, c10::SymIntArrayRef output_size) {
13370 return at::_ops::_adaptive_avg_pool3d::call(self, output_size);
13371 }
13372}
13373
13374// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
13375inline at::Tensor & adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) {
13376 return at::_ops::adaptive_avg_pool3d_backward_grad_input::call(grad_output, self, grad_input);
13377}
13378// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
13379inline at::Tensor & adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
13380 return at::_ops::adaptive_avg_pool3d_backward_grad_input::call(grad_output, self, grad_input);
13381}
13382
13383// aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor
13384inline at::Tensor _adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self) {
13385 return at::_ops::_adaptive_avg_pool3d_backward::call(grad_output, self);
13386}
13387
13388// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
13389inline ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) {
13390 return at::_ops::adaptive_max_pool2d_out::call(self, output_size, out, indices);
13391}
13392// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
13393inline ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
13394 return at::_ops::adaptive_max_pool2d_out::call(self, output_size, out, indices);
13395}
13396
13397// aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
13398inline ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size) {
13399 return at::_ops::adaptive_max_pool2d::call(self, output_size);
13400}
13401
13402// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
13403inline at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
13404 return at::_ops::adaptive_max_pool2d_backward_grad_input::call(grad_output, self, indices, grad_input);
13405}
13406// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
13407inline at::Tensor & adaptive_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
13408 return at::_ops::adaptive_max_pool2d_backward_grad_input::call(grad_output, self, indices, grad_input);
13409}
13410
13411// aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
13412inline at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
13413 return at::_ops::adaptive_max_pool2d_backward::call(grad_output, self, indices);
13414}
13415
13416// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
13417inline ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) {
13418 return at::_ops::adaptive_max_pool3d_out::call(self, output_size, out, indices);
13419}
13420// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
13421inline ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
13422 return at::_ops::adaptive_max_pool3d_out::call(self, output_size, out, indices);
13423}
13424
13425// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
13426inline ::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size) {
13427 return at::_ops::adaptive_max_pool3d::call(self, output_size);
13428}
13429
13430// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
13431inline at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
13432 return at::_ops::adaptive_max_pool3d_backward_grad_input::call(grad_output, self, indices, grad_input);
13433}
13434// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
13435inline at::Tensor & adaptive_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
13436 return at::_ops::adaptive_max_pool3d_backward_grad_input::call(grad_output, self, indices, grad_input);
13437}
13438
13439// aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
13440inline at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
13441 return at::_ops::adaptive_max_pool3d_backward::call(grad_output, self, indices);
13442}
13443
13444// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
13445inline at::Tensor & avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional<int64_t> divisor_override=c10::nullopt) {
13446 return at::_ops::avg_pool2d_out::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
13447}
13448// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
13449inline at::Tensor & avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out) {
13450 return at::_ops::avg_pool2d_out::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
13451}
13452
13453// aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
13454inline at::Tensor avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional<int64_t> divisor_override=c10::nullopt) {
13455 return at::_ops::avg_pool2d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
13456}
13457
13458// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
13459inline at::Tensor & avg_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
13460 return at::_ops::avg_pool2d_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
13461}
13462// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
13463inline at::Tensor & avg_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input) {
13464 return at::_ops::avg_pool2d_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
13465}
13466
13467// aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
13468inline at::Tensor avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
13469 return at::_ops::avg_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
13470}
13471
13472// aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
13473inline at::Tensor & avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional<int64_t> divisor_override=c10::nullopt) {
13474 return at::_ops::avg_pool3d_out::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
13475}
13476// aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
13477inline at::Tensor & avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out) {
13478 return at::_ops::avg_pool3d_out::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out);
13479}
13480
13481// aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
13482inline at::Tensor avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional<int64_t> divisor_override=c10::nullopt) {
13483 return at::_ops::avg_pool3d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
13484}
13485
13486// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
13487inline at::Tensor & avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
13488 return at::_ops::avg_pool3d_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
13489}
13490// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
13491inline at::Tensor & avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input) {
13492 return at::_ops::avg_pool3d_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input);
13493}
13494
13495// aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
13496inline at::Tensor avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
13497 return at::_ops::avg_pool3d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
13498}
13499
13500// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
13501inline ::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
13502 return at::_ops::fractional_max_pool2d_output::call(self, kernel_size, output_size, random_samples, output, indices);
13503}
13504// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
13505inline ::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
13506 return at::_ops::fractional_max_pool2d_output::call(self, kernel_size, output_size, random_samples, output, indices);
13507}
13508
13509// aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)
13510inline ::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
13511 return at::_ops::fractional_max_pool2d::call(self, kernel_size, output_size, random_samples);
13512}
13513
13514// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
13515inline at::Tensor & fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
13516 return at::_ops::fractional_max_pool2d_backward_grad_input::call(grad_output, self, kernel_size, output_size, indices, grad_input);
13517}
13518// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
13519inline at::Tensor & fractional_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
13520 return at::_ops::fractional_max_pool2d_backward_grad_input::call(grad_output, self, kernel_size, output_size, indices, grad_input);
13521}
13522
13523// aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor
13524inline at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
13525 return at::_ops::fractional_max_pool2d_backward::call(grad_output, self, kernel_size, output_size, indices);
13526}
13527
13528// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
13529inline ::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
13530 return at::_ops::fractional_max_pool3d_output::call(self, kernel_size, output_size, random_samples, output, indices);
13531}
13532// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
13533inline ::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
13534 return at::_ops::fractional_max_pool3d_output::call(self, kernel_size, output_size, random_samples, output, indices);
13535}
13536
13537// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)
13538inline ::std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
13539 return at::_ops::fractional_max_pool3d::call(self, kernel_size, output_size, random_samples);
13540}
13541
13542// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
13543inline at::Tensor & fractional_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
13544 return at::_ops::fractional_max_pool3d_backward_grad_input::call(grad_output, self, kernel_size, output_size, indices, grad_input);
13545}
13546// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
13547inline at::Tensor & fractional_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
13548 return at::_ops::fractional_max_pool3d_backward_grad_input::call(grad_output, self, kernel_size, output_size, indices, grad_input);
13549}
13550
13551// aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor
13552inline at::Tensor fractional_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
13553 return at::_ops::fractional_max_pool3d_backward::call(grad_output, self, kernel_size, output_size, indices);
13554}
13555
13556// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
13557inline ::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
13558 return at::_ops::max_pool2d_with_indices_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
13559}
13560// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
13561inline ::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
13562 return at::_ops::max_pool2d_with_indices_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
13563}
13564
13565// aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
13566inline ::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
13567 return at::_ops::max_pool2d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
13568}
13569
13570// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
13571inline at::Tensor & max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
13572 return at::_ops::max_pool2d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
13573}
13574// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
13575inline at::Tensor & max_pool2d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
13576 return at::_ops::max_pool2d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
13577}
13578
13579// aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
13580inline at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
13581 return at::_ops::max_pool2d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
13582}
13583
13584// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
13585inline ::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
13586 return at::_ops::max_pool3d_with_indices_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
13587}
13588// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
13589inline ::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
13590 return at::_ops::max_pool3d_with_indices_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
13591}
13592
13593// aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
13594inline ::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
13595 return at::_ops::max_pool3d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
13596}
13597
13598// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
13599inline at::Tensor & max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
13600 return at::_ops::max_pool3d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
13601}
13602// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
13603inline at::Tensor & max_pool3d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
13604 return at::_ops::max_pool3d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input);
13605}
13606
13607// aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor
13608inline at::Tensor max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
13609 return at::_ops::max_pool3d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
13610}
13611
13612// aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
13613inline at::Tensor & max_unpool2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) {
13614 return at::_ops::max_unpool2d_out::call(self, indices, output_size, out);
13615}
13616// aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
13617inline at::Tensor & max_unpool2d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) {
13618 return at::_ops::max_unpool2d_out::call(self, indices, output_size, out);
13619}
13620
13621// aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor
13622inline at::Tensor max_unpool2d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) {
13623 return at::_ops::max_unpool2d::call(self, indices, output_size);
13624}
13625
13626// aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)
13627inline at::Tensor & max_unpool3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
13628 return at::_ops::max_unpool3d_out::call(self, indices, output_size, stride, padding, out);
13629}
13630// aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)
13631inline at::Tensor & max_unpool3d_outf(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
13632 return at::_ops::max_unpool3d_out::call(self, indices, output_size, stride, padding, out);
13633}
13634
13635// aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor
13636inline at::Tensor max_unpool3d(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
13637 return at::_ops::max_unpool3d::call(self, indices, output_size, stride, padding);
13638}
13639
13640// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
13641inline at::Tensor & reflection_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
13642 return at::_ops::reflection_pad1d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
13643}
13644namespace symint {
13645 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13646 at::Tensor & reflection_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
13647 return at::_ops::reflection_pad1d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
13648 }
13649}
13650
13651// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
13652inline at::Tensor & reflection_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
13653 return at::_ops::reflection_pad1d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
13654}
13655namespace symint {
13656 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13657 at::Tensor & reflection_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
13658 return at::_ops::reflection_pad1d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
13659 }
13660}
13661
13662// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
13663inline at::Tensor & reflection_pad1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
13664 return at::_ops::reflection_pad1d_out::call(self, padding, out);
13665}
13666namespace symint {
13667 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13668 at::Tensor & reflection_pad1d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
13669 return at::_ops::reflection_pad1d_out::call(self, padding, out);
13670 }
13671}
13672
13673// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
13674inline at::Tensor & reflection_pad1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
13675 return at::_ops::reflection_pad1d_out::call(self, padding, out);
13676}
13677namespace symint {
13678 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13679 at::Tensor & reflection_pad1d_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
13680 return at::_ops::reflection_pad1d_out::call(self, padding, out);
13681 }
13682}
13683
13684// aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor
13685inline at::Tensor reflection_pad1d(const at::Tensor & self, at::IntArrayRef padding) {
13686 return at::_ops::reflection_pad1d::call(self, c10::fromIntArrayRefSlow(padding));
13687}
13688namespace symint {
13689 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13690 at::Tensor reflection_pad1d(const at::Tensor & self, at::IntArrayRef padding) {
13691 return at::_ops::reflection_pad1d::call(self, c10::fromIntArrayRefSlow(padding));
13692 }
13693}
13694
13695// aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor
13696inline at::Tensor reflection_pad1d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
13697 return at::_ops::reflection_pad1d::call(self, padding);
13698}
13699namespace symint {
13700 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13701 at::Tensor reflection_pad1d(const at::Tensor & self, c10::SymIntArrayRef padding) {
13702 return at::_ops::reflection_pad1d::call(self, padding);
13703 }
13704}
13705
13706// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
13707inline at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
13708 return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
13709}
13710namespace symint {
13711 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13712 at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
13713 return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
13714 }
13715}
13716
13717// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
13718inline at::Tensor & reflection_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
13719 return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
13720}
13721namespace symint {
13722 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13723 at::Tensor & reflection_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
13724 return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
13725 }
13726}
13727
13728// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
13729inline at::Tensor & reflection_pad1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
13730 return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input);
13731}
13732namespace symint {
13733 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13734 at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
13735 return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input);
13736 }
13737}
13738
13739// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
13740inline at::Tensor & reflection_pad1d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
13741 return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input);
13742}
13743namespace symint {
13744 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13745 at::Tensor & reflection_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
13746 return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input);
13747 }
13748}
13749
13750// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
13751inline at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
13752 return at::_ops::reflection_pad1d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding));
13753}
13754namespace symint {
13755 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13756 at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
13757 return at::_ops::reflection_pad1d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding));
13758 }
13759}
13760
13761// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
13762inline at::Tensor reflection_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
13763 return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding);
13764}
13765namespace symint {
13766 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13767 at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
13768 return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding);
13769 }
13770}
13771
13772// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
13773inline at::Tensor & reflection_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
13774 return at::_ops::reflection_pad2d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
13775}
13776namespace symint {
13777 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13778 at::Tensor & reflection_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
13779 return at::_ops::reflection_pad2d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
13780 }
13781}
13782
13783// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
13784inline at::Tensor & reflection_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
13785 return at::_ops::reflection_pad2d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
13786}
13787namespace symint {
13788 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13789 at::Tensor & reflection_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
13790 return at::_ops::reflection_pad2d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
13791 }
13792}
13793
13794// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
13795inline at::Tensor & reflection_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
13796 return at::_ops::reflection_pad2d_out::call(self, padding, out);
13797}
13798namespace symint {
13799 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13800 at::Tensor & reflection_pad2d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
13801 return at::_ops::reflection_pad2d_out::call(self, padding, out);
13802 }
13803}
13804
13805// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
13806inline at::Tensor & reflection_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
13807 return at::_ops::reflection_pad2d_out::call(self, padding, out);
13808}
13809namespace symint {
13810 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13811 at::Tensor & reflection_pad2d_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
13812 return at::_ops::reflection_pad2d_out::call(self, padding, out);
13813 }
13814}
13815
13816// aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor
13817inline at::Tensor reflection_pad2d(const at::Tensor & self, at::IntArrayRef padding) {
13818 return at::_ops::reflection_pad2d::call(self, c10::fromIntArrayRefSlow(padding));
13819}
13820namespace symint {
13821 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13822 at::Tensor reflection_pad2d(const at::Tensor & self, at::IntArrayRef padding) {
13823 return at::_ops::reflection_pad2d::call(self, c10::fromIntArrayRefSlow(padding));
13824 }
13825}
13826
13827// aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor
13828inline at::Tensor reflection_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
13829 return at::_ops::reflection_pad2d::call(self, padding);
13830}
13831namespace symint {
13832 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13833 at::Tensor reflection_pad2d(const at::Tensor & self, c10::SymIntArrayRef padding) {
13834 return at::_ops::reflection_pad2d::call(self, padding);
13835 }
13836}
13837
13838// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
13839inline at::Tensor & reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
13840 return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
13841}
13842namespace symint {
13843 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13844 at::Tensor & reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
13845 return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
13846 }
13847}
13848
13849// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
13850inline at::Tensor & reflection_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
13851 return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
13852}
13853namespace symint {
13854 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13855 at::Tensor & reflection_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
13856 return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
13857 }
13858}
13859
13860// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
13861inline at::Tensor & reflection_pad2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
13862 return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input);
13863}
13864namespace symint {
13865 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13866 at::Tensor & reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
13867 return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input);
13868 }
13869}
13870
13871// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
13872inline at::Tensor & reflection_pad2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
13873 return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input);
13874}
13875namespace symint {
13876 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13877 at::Tensor & reflection_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
13878 return at::_ops::reflection_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input);
13879 }
13880}
13881
13882// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
13883inline at::Tensor reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
13884 return at::_ops::reflection_pad2d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding));
13885}
13886namespace symint {
13887 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13888 at::Tensor reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
13889 return at::_ops::reflection_pad2d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding));
13890 }
13891}
13892
13893// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
13894inline at::Tensor reflection_pad2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
13895 return at::_ops::reflection_pad2d_backward::call(grad_output, self, padding);
13896}
13897namespace symint {
13898 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13899 at::Tensor reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
13900 return at::_ops::reflection_pad2d_backward::call(grad_output, self, padding);
13901 }
13902}
13903
13904// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
13905inline at::Tensor & reflection_pad3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
13906 return at::_ops::reflection_pad3d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
13907}
13908namespace symint {
13909 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13910 at::Tensor & reflection_pad3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
13911 return at::_ops::reflection_pad3d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
13912 }
13913}
13914
13915// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
13916inline at::Tensor & reflection_pad3d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
13917 return at::_ops::reflection_pad3d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
13918}
13919namespace symint {
13920 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13921 at::Tensor & reflection_pad3d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
13922 return at::_ops::reflection_pad3d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
13923 }
13924}
13925
13926// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
13927inline at::Tensor & reflection_pad3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
13928 return at::_ops::reflection_pad3d_out::call(self, padding, out);
13929}
13930namespace symint {
13931 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13932 at::Tensor & reflection_pad3d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
13933 return at::_ops::reflection_pad3d_out::call(self, padding, out);
13934 }
13935}
13936
13937// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
13938inline at::Tensor & reflection_pad3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
13939 return at::_ops::reflection_pad3d_out::call(self, padding, out);
13940}
13941namespace symint {
13942 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13943 at::Tensor & reflection_pad3d_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
13944 return at::_ops::reflection_pad3d_out::call(self, padding, out);
13945 }
13946}
13947
13948// aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor
13949inline at::Tensor reflection_pad3d(const at::Tensor & self, at::IntArrayRef padding) {
13950 return at::_ops::reflection_pad3d::call(self, c10::fromIntArrayRefSlow(padding));
13951}
13952namespace symint {
13953 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13954 at::Tensor reflection_pad3d(const at::Tensor & self, at::IntArrayRef padding) {
13955 return at::_ops::reflection_pad3d::call(self, c10::fromIntArrayRefSlow(padding));
13956 }
13957}
13958
13959// aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor
13960inline at::Tensor reflection_pad3d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
13961 return at::_ops::reflection_pad3d::call(self, padding);
13962}
13963namespace symint {
13964 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13965 at::Tensor reflection_pad3d(const at::Tensor & self, c10::SymIntArrayRef padding) {
13966 return at::_ops::reflection_pad3d::call(self, padding);
13967 }
13968}
13969
13970// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
13971inline at::Tensor & reflection_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
13972 return at::_ops::reflection_pad3d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
13973}
13974namespace symint {
13975 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13976 at::Tensor & reflection_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
13977 return at::_ops::reflection_pad3d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
13978 }
13979}
13980
13981// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
13982inline at::Tensor & reflection_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
13983 return at::_ops::reflection_pad3d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
13984}
13985namespace symint {
13986 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
13987 at::Tensor & reflection_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
13988 return at::_ops::reflection_pad3d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
13989 }
13990}
13991
13992// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
13993inline at::Tensor & reflection_pad3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
13994 return at::_ops::reflection_pad3d_backward_grad_input::call(grad_output, self, padding, grad_input);
13995}
13996namespace symint {
13997 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
13998 at::Tensor & reflection_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
13999 return at::_ops::reflection_pad3d_backward_grad_input::call(grad_output, self, padding, grad_input);
14000 }
14001}
14002
14003// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14004inline at::Tensor & reflection_pad3d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
14005 return at::_ops::reflection_pad3d_backward_grad_input::call(grad_output, self, padding, grad_input);
14006}
14007namespace symint {
14008 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14009 at::Tensor & reflection_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
14010 return at::_ops::reflection_pad3d_backward_grad_input::call(grad_output, self, padding, grad_input);
14011 }
14012}
14013
14014// aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
14015inline at::Tensor reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14016 return at::_ops::reflection_pad3d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding));
14017}
14018namespace symint {
14019 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14020 at::Tensor reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14021 return at::_ops::reflection_pad3d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding));
14022 }
14023}
14024
14025// aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
14026inline at::Tensor reflection_pad3d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14027 return at::_ops::reflection_pad3d_backward::call(grad_output, self, padding);
14028}
14029namespace symint {
14030 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14031 at::Tensor reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14032 return at::_ops::reflection_pad3d_backward::call(grad_output, self, padding);
14033 }
14034}
14035
14036// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
14037inline at::Tensor & replication_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
14038 return at::_ops::replication_pad1d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
14039}
14040namespace symint {
14041 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14042 at::Tensor & replication_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
14043 return at::_ops::replication_pad1d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
14044 }
14045}
14046
14047// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
14048inline at::Tensor & replication_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
14049 return at::_ops::replication_pad1d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
14050}
14051namespace symint {
14052 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14053 at::Tensor & replication_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
14054 return at::_ops::replication_pad1d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
14055 }
14056}
14057
14058// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
14059inline at::Tensor & replication_pad1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
14060 return at::_ops::replication_pad1d_out::call(self, padding, out);
14061}
14062namespace symint {
14063 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14064 at::Tensor & replication_pad1d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
14065 return at::_ops::replication_pad1d_out::call(self, padding, out);
14066 }
14067}
14068
14069// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)
14070inline at::Tensor & replication_pad1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
14071 return at::_ops::replication_pad1d_out::call(self, padding, out);
14072}
14073namespace symint {
14074 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14075 at::Tensor & replication_pad1d_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
14076 return at::_ops::replication_pad1d_out::call(self, padding, out);
14077 }
14078}
14079
14080// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor
14081inline at::Tensor replication_pad1d(const at::Tensor & self, at::IntArrayRef padding) {
14082 return at::_ops::replication_pad1d::call(self, c10::fromIntArrayRefSlow(padding));
14083}
14084namespace symint {
14085 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14086 at::Tensor replication_pad1d(const at::Tensor & self, at::IntArrayRef padding) {
14087 return at::_ops::replication_pad1d::call(self, c10::fromIntArrayRefSlow(padding));
14088 }
14089}
14090
14091// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor
14092inline at::Tensor replication_pad1d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
14093 return at::_ops::replication_pad1d::call(self, padding);
14094}
14095namespace symint {
14096 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14097 at::Tensor replication_pad1d(const at::Tensor & self, c10::SymIntArrayRef padding) {
14098 return at::_ops::replication_pad1d::call(self, padding);
14099 }
14100}
14101
14102// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14103inline at::Tensor & replication_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14104 return at::_ops::replication_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
14105}
14106namespace symint {
14107 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14108 at::Tensor & replication_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14109 return at::_ops::replication_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
14110 }
14111}
14112
14113// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14114inline at::Tensor & replication_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
14115 return at::_ops::replication_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
14116}
14117namespace symint {
14118 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14119 at::Tensor & replication_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
14120 return at::_ops::replication_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
14121 }
14122}
14123
14124// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14125inline at::Tensor & replication_pad1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14126 return at::_ops::replication_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input);
14127}
14128namespace symint {
14129 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14130 at::Tensor & replication_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14131 return at::_ops::replication_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input);
14132 }
14133}
14134
14135// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14136inline at::Tensor & replication_pad1d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
14137 return at::_ops::replication_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input);
14138}
14139namespace symint {
14140 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14141 at::Tensor & replication_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
14142 return at::_ops::replication_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input);
14143 }
14144}
14145
14146// aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
14147inline at::Tensor replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14148 return at::_ops::replication_pad1d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding));
14149}
14150namespace symint {
14151 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14152 at::Tensor replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14153 return at::_ops::replication_pad1d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding));
14154 }
14155}
14156
14157// aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor
14158inline at::Tensor replication_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14159 return at::_ops::replication_pad1d_backward::call(grad_output, self, padding);
14160}
14161namespace symint {
14162 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14163 at::Tensor replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14164 return at::_ops::replication_pad1d_backward::call(grad_output, self, padding);
14165 }
14166}
14167
14168// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
14169inline at::Tensor & replication_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
14170 return at::_ops::replication_pad2d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
14171}
14172namespace symint {
14173 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14174 at::Tensor & replication_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
14175 return at::_ops::replication_pad2d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
14176 }
14177}
14178
14179// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
14180inline at::Tensor & replication_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
14181 return at::_ops::replication_pad2d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
14182}
14183namespace symint {
14184 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14185 at::Tensor & replication_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
14186 return at::_ops::replication_pad2d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
14187 }
14188}
14189
14190// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
14191inline at::Tensor & replication_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
14192 return at::_ops::replication_pad2d_out::call(self, padding, out);
14193}
14194namespace symint {
14195 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14196 at::Tensor & replication_pad2d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
14197 return at::_ops::replication_pad2d_out::call(self, padding, out);
14198 }
14199}
14200
14201// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)
14202inline at::Tensor & replication_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
14203 return at::_ops::replication_pad2d_out::call(self, padding, out);
14204}
14205namespace symint {
14206 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14207 at::Tensor & replication_pad2d_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
14208 return at::_ops::replication_pad2d_out::call(self, padding, out);
14209 }
14210}
14211
14212// aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor
14213inline at::Tensor replication_pad2d(const at::Tensor & self, at::IntArrayRef padding) {
14214 return at::_ops::replication_pad2d::call(self, c10::fromIntArrayRefSlow(padding));
14215}
14216namespace symint {
14217 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14218 at::Tensor replication_pad2d(const at::Tensor & self, at::IntArrayRef padding) {
14219 return at::_ops::replication_pad2d::call(self, c10::fromIntArrayRefSlow(padding));
14220 }
14221}
14222
14223// aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor
14224inline at::Tensor replication_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
14225 return at::_ops::replication_pad2d::call(self, padding);
14226}
14227namespace symint {
14228 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14229 at::Tensor replication_pad2d(const at::Tensor & self, c10::SymIntArrayRef padding) {
14230 return at::_ops::replication_pad2d::call(self, padding);
14231 }
14232}
14233
14234// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14235inline at::Tensor & replication_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14236 return at::_ops::replication_pad2d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
14237}
14238namespace symint {
14239 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14240 at::Tensor & replication_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14241 return at::_ops::replication_pad2d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
14242 }
14243}
14244
14245// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14246inline at::Tensor & replication_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
14247 return at::_ops::replication_pad2d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
14248}
14249namespace symint {
14250 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14251 at::Tensor & replication_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
14252 return at::_ops::replication_pad2d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
14253 }
14254}
14255
14256// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14257inline at::Tensor & replication_pad2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14258 return at::_ops::replication_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input);
14259}
14260namespace symint {
14261 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14262 at::Tensor & replication_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14263 return at::_ops::replication_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input);
14264 }
14265}
14266
14267// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14268inline at::Tensor & replication_pad2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
14269 return at::_ops::replication_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input);
14270}
14271namespace symint {
14272 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14273 at::Tensor & replication_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
14274 return at::_ops::replication_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input);
14275 }
14276}
14277
14278// aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
14279inline at::Tensor replication_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14280 return at::_ops::replication_pad2d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding));
14281}
14282namespace symint {
14283 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14284 at::Tensor replication_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14285 return at::_ops::replication_pad2d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding));
14286 }
14287}
14288
14289// aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor
14290inline at::Tensor replication_pad2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14291 return at::_ops::replication_pad2d_backward::call(grad_output, self, padding);
14292}
14293namespace symint {
14294 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14295 at::Tensor replication_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14296 return at::_ops::replication_pad2d_backward::call(grad_output, self, padding);
14297 }
14298}
14299
14300// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
14301inline at::Tensor & replication_pad3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
14302 return at::_ops::replication_pad3d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
14303}
14304namespace symint {
14305 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14306 at::Tensor & replication_pad3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) {
14307 return at::_ops::replication_pad3d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
14308 }
14309}
14310
14311// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
14312inline at::Tensor & replication_pad3d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
14313 return at::_ops::replication_pad3d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
14314}
14315namespace symint {
14316 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14317 at::Tensor & replication_pad3d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) {
14318 return at::_ops::replication_pad3d_out::call(self, c10::fromIntArrayRefSlow(padding), out);
14319 }
14320}
14321
14322// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
14323inline at::Tensor & replication_pad3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
14324 return at::_ops::replication_pad3d_out::call(self, padding, out);
14325}
14326namespace symint {
14327 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14328 at::Tensor & replication_pad3d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) {
14329 return at::_ops::replication_pad3d_out::call(self, padding, out);
14330 }
14331}
14332
14333// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)
14334inline at::Tensor & replication_pad3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
14335 return at::_ops::replication_pad3d_out::call(self, padding, out);
14336}
14337namespace symint {
14338 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14339 at::Tensor & replication_pad3d_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
14340 return at::_ops::replication_pad3d_out::call(self, padding, out);
14341 }
14342}
14343
14344// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor
14345inline at::Tensor replication_pad3d(const at::Tensor & self, at::IntArrayRef padding) {
14346 return at::_ops::replication_pad3d::call(self, c10::fromIntArrayRefSlow(padding));
14347}
14348namespace symint {
14349 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14350 at::Tensor replication_pad3d(const at::Tensor & self, at::IntArrayRef padding) {
14351 return at::_ops::replication_pad3d::call(self, c10::fromIntArrayRefSlow(padding));
14352 }
14353}
14354
14355// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor
14356inline at::Tensor replication_pad3d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) {
14357 return at::_ops::replication_pad3d::call(self, padding);
14358}
14359namespace symint {
14360 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14361 at::Tensor replication_pad3d(const at::Tensor & self, c10::SymIntArrayRef padding) {
14362 return at::_ops::replication_pad3d::call(self, padding);
14363 }
14364}
14365
14366// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14367inline at::Tensor & replication_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14368 return at::_ops::replication_pad3d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
14369}
14370namespace symint {
14371 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14372 at::Tensor & replication_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14373 return at::_ops::replication_pad3d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
14374 }
14375}
14376
14377// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14378inline at::Tensor & replication_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
14379 return at::_ops::replication_pad3d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
14380}
14381namespace symint {
14382 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14383 at::Tensor & replication_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) {
14384 return at::_ops::replication_pad3d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input);
14385 }
14386}
14387
14388// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14389inline at::Tensor & replication_pad3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14390 return at::_ops::replication_pad3d_backward_grad_input::call(grad_output, self, padding, grad_input);
14391}
14392namespace symint {
14393 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14394 at::Tensor & replication_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14395 return at::_ops::replication_pad3d_backward_grad_input::call(grad_output, self, padding, grad_input);
14396 }
14397}
14398
14399// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
14400inline at::Tensor & replication_pad3d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
14401 return at::_ops::replication_pad3d_backward_grad_input::call(grad_output, self, padding, grad_input);
14402}
14403namespace symint {
14404 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14405 at::Tensor & replication_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
14406 return at::_ops::replication_pad3d_backward_grad_input::call(grad_output, self, padding, grad_input);
14407 }
14408}
14409
14410// aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
14411inline at::Tensor replication_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14412 return at::_ops::replication_pad3d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding));
14413}
14414namespace symint {
14415 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14416 at::Tensor replication_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) {
14417 return at::_ops::replication_pad3d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding));
14418 }
14419}
14420
14421// aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor
14422inline at::Tensor replication_pad3d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14423 return at::_ops::replication_pad3d_backward::call(grad_output, self, padding);
14424}
14425namespace symint {
14426 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14427 at::Tensor replication_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
14428 return at::_ops::replication_pad3d_backward::call(grad_output, self, padding);
14429 }
14430}
14431
14432// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor
14433inline at::Tensor _pad_circular(const at::Tensor & self, at::IntArrayRef pad) {
14434 return at::_ops::_pad_circular::call(self, c10::fromIntArrayRefSlow(pad));
14435}
14436namespace symint {
14437 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14438 at::Tensor _pad_circular(const at::Tensor & self, at::IntArrayRef pad) {
14439 return at::_ops::_pad_circular::call(self, c10::fromIntArrayRefSlow(pad));
14440 }
14441}
14442
14443// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor
14444inline at::Tensor _pad_circular_symint(const at::Tensor & self, c10::SymIntArrayRef pad) {
14445 return at::_ops::_pad_circular::call(self, pad);
14446}
14447namespace symint {
14448 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14449 at::Tensor _pad_circular(const at::Tensor & self, c10::SymIntArrayRef pad) {
14450 return at::_ops::_pad_circular::call(self, pad);
14451 }
14452}
14453
14454// aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor
14455inline at::Tensor _pad_enum(const at::Tensor & self, at::IntArrayRef pad, int64_t mode, c10::optional<double> value=c10::nullopt) {
14456 return at::_ops::_pad_enum::call(self, c10::fromIntArrayRefSlow(pad), mode, value);
14457}
14458namespace symint {
14459 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14460 at::Tensor _pad_enum(const at::Tensor & self, at::IntArrayRef pad, int64_t mode, c10::optional<double> value=c10::nullopt) {
14461 return at::_ops::_pad_enum::call(self, c10::fromIntArrayRefSlow(pad), mode, value);
14462 }
14463}
14464
14465// aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor
14466inline at::Tensor _pad_enum_symint(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value=c10::nullopt) {
14467 return at::_ops::_pad_enum::call(self, pad, mode, value);
14468}
14469namespace symint {
14470 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14471 at::Tensor _pad_enum(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value=c10::nullopt) {
14472 return at::_ops::_pad_enum::call(self, pad, mode, value);
14473 }
14474}
14475
14476// aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor
14477inline at::Tensor pad(const at::Tensor & self, at::IntArrayRef pad, c10::string_view mode="constant", c10::optional<double> value=c10::nullopt) {
14478 return at::_ops::pad::call(self, c10::fromIntArrayRefSlow(pad), mode, value);
14479}
14480namespace symint {
14481 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14482 at::Tensor pad(const at::Tensor & self, at::IntArrayRef pad, c10::string_view mode="constant", c10::optional<double> value=c10::nullopt) {
14483 return at::_ops::pad::call(self, c10::fromIntArrayRefSlow(pad), mode, value);
14484 }
14485}
14486
14487// aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor
14488inline at::Tensor pad_symint(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode="constant", c10::optional<double> value=c10::nullopt) {
14489 return at::_ops::pad::call(self, pad, mode, value);
14490}
14491namespace symint {
14492 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14493 at::Tensor pad(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode="constant", c10::optional<double> value=c10::nullopt) {
14494 return at::_ops::pad::call(self, pad, mode, value);
14495 }
14496}
14497
14498// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
14499inline at::Tensor upsample_linear1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14500 return at::_ops::upsample_linear1d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
14501}
14502namespace symint {
14503 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14504 at::Tensor upsample_linear1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14505 return at::_ops::upsample_linear1d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
14506 }
14507}
14508
14509// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
14510inline at::Tensor upsample_linear1d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14511 return at::_ops::upsample_linear1d_vec::call(input, output_size, align_corners, scale_factors);
14512}
14513namespace symint {
14514 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14515 at::Tensor upsample_linear1d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14516 return at::_ops::upsample_linear1d_vec::call(input, output_size, align_corners, scale_factors);
14517 }
14518}
14519
14520// aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
14521inline at::Tensor upsample_bilinear2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14522 return at::_ops::upsample_bilinear2d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
14523}
14524namespace symint {
14525 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14526 at::Tensor upsample_bilinear2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14527 return at::_ops::upsample_bilinear2d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
14528 }
14529}
14530
14531// aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
14532inline at::Tensor upsample_bilinear2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14533 return at::_ops::upsample_bilinear2d_vec::call(input, output_size, align_corners, scale_factors);
14534}
14535namespace symint {
14536 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14537 at::Tensor upsample_bilinear2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14538 return at::_ops::upsample_bilinear2d_vec::call(input, output_size, align_corners, scale_factors);
14539 }
14540}
14541
14542// aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
14543inline at::Tensor _upsample_bilinear2d_aa(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14544 return at::_ops::_upsample_bilinear2d_aa_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
14545}
14546namespace symint {
14547 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14548 at::Tensor _upsample_bilinear2d_aa(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14549 return at::_ops::_upsample_bilinear2d_aa_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
14550 }
14551}
14552
14553// aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
14554inline at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14555 return at::_ops::_upsample_bilinear2d_aa_vec::call(input, output_size, align_corners, scale_factors);
14556}
14557namespace symint {
14558 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14559 at::Tensor _upsample_bilinear2d_aa(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14560 return at::_ops::_upsample_bilinear2d_aa_vec::call(input, output_size, align_corners, scale_factors);
14561 }
14562}
14563
14564// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
14565inline at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14566 return at::_ops::upsample_trilinear3d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
14567}
14568namespace symint {
14569 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14570 at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14571 return at::_ops::upsample_trilinear3d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
14572 }
14573}
14574
14575// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
14576inline at::Tensor upsample_trilinear3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14577 return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors);
14578}
14579namespace symint {
14580 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14581 at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14582 return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors);
14583 }
14584}
14585
14586// aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
14587inline at::Tensor upsample_bicubic2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14588 return at::_ops::upsample_bicubic2d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
14589}
14590namespace symint {
14591 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14592 at::Tensor upsample_bicubic2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14593 return at::_ops::upsample_bicubic2d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
14594 }
14595}
14596
14597// aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
14598inline at::Tensor upsample_bicubic2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14599 return at::_ops::upsample_bicubic2d_vec::call(input, output_size, align_corners, scale_factors);
14600}
14601namespace symint {
14602 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14603 at::Tensor upsample_bicubic2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14604 return at::_ops::upsample_bicubic2d_vec::call(input, output_size, align_corners, scale_factors);
14605 }
14606}
14607
14608// aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
14609inline at::Tensor _upsample_bicubic2d_aa(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14610 return at::_ops::_upsample_bicubic2d_aa_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
14611}
14612namespace symint {
14613 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14614 at::Tensor _upsample_bicubic2d_aa(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14615 return at::_ops::_upsample_bicubic2d_aa_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, align_corners, scale_factors);
14616 }
14617}
14618
14619// aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
14620inline at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14621 return at::_ops::_upsample_bicubic2d_aa_vec::call(input, output_size, align_corners, scale_factors);
14622}
14623namespace symint {
14624 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14625 at::Tensor _upsample_bicubic2d_aa(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
14626 return at::_ops::_upsample_bicubic2d_aa_vec::call(input, output_size, align_corners, scale_factors);
14627 }
14628}
14629
14630// aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
14631inline at::Tensor upsample_nearest1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14632 return at::_ops::upsample_nearest1d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
14633}
14634namespace symint {
14635 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14636 at::Tensor upsample_nearest1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14637 return at::_ops::upsample_nearest1d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
14638 }
14639}
14640
14641// aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
14642inline at::Tensor upsample_nearest1d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14643 return at::_ops::upsample_nearest1d_vec::call(input, output_size, scale_factors);
14644}
14645namespace symint {
14646 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14647 at::Tensor upsample_nearest1d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14648 return at::_ops::upsample_nearest1d_vec::call(input, output_size, scale_factors);
14649 }
14650}
14651
14652// aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
14653inline at::Tensor _upsample_nearest_exact1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14654 return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
14655}
14656namespace symint {
14657 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14658 at::Tensor _upsample_nearest_exact1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14659 return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
14660 }
14661}
14662
14663// aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
14664inline at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14665 return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size, scale_factors);
14666}
14667namespace symint {
14668 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14669 at::Tensor _upsample_nearest_exact1d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14670 return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size, scale_factors);
14671 }
14672}
14673
14674// aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
14675inline at::Tensor upsample_nearest2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14676 return at::_ops::upsample_nearest2d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
14677}
14678namespace symint {
14679 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14680 at::Tensor upsample_nearest2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14681 return at::_ops::upsample_nearest2d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
14682 }
14683}
14684
14685// aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
14686inline at::Tensor upsample_nearest2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14687 return at::_ops::upsample_nearest2d_vec::call(input, output_size, scale_factors);
14688}
14689namespace symint {
14690 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14691 at::Tensor upsample_nearest2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14692 return at::_ops::upsample_nearest2d_vec::call(input, output_size, scale_factors);
14693 }
14694}
14695
14696// aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
14697inline at::Tensor _upsample_nearest_exact2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14698 return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
14699}
14700namespace symint {
14701 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14702 at::Tensor _upsample_nearest_exact2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14703 return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
14704 }
14705}
14706
14707// aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
14708inline at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14709 return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size, scale_factors);
14710}
14711namespace symint {
14712 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14713 at::Tensor _upsample_nearest_exact2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14714 return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size, scale_factors);
14715 }
14716}
14717
14718// aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
14719inline at::Tensor upsample_nearest3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14720 return at::_ops::upsample_nearest3d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
14721}
14722namespace symint {
14723 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14724 at::Tensor upsample_nearest3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14725 return at::_ops::upsample_nearest3d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
14726 }
14727}
14728
14729// aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
14730inline at::Tensor upsample_nearest3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14731 return at::_ops::upsample_nearest3d_vec::call(input, output_size, scale_factors);
14732}
14733namespace symint {
14734 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14735 at::Tensor upsample_nearest3d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14736 return at::_ops::upsample_nearest3d_vec::call(input, output_size, scale_factors);
14737 }
14738}
14739
14740// aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
14741inline at::Tensor _upsample_nearest_exact3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14742 return at::_ops::_upsample_nearest_exact3d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
14743}
14744namespace symint {
14745 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14746 at::Tensor _upsample_nearest_exact3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14747 return at::_ops::_upsample_nearest_exact3d_vec::call(input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, scale_factors);
14748 }
14749}
14750
14751// aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor
14752inline at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14753 return at::_ops::_upsample_nearest_exact3d_vec::call(input, output_size, scale_factors);
14754}
14755namespace symint {
14756 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14757 at::Tensor _upsample_nearest_exact3d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
14758 return at::_ops::_upsample_nearest_exact3d_vec::call(input, output_size, scale_factors);
14759 }
14760}
14761
14762// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
14763inline at::Tensor & upsample_linear1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14764 return at::_ops::upsample_linear1d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales, out);
14765}
14766namespace symint {
14767 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14768 at::Tensor & upsample_linear1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14769 return at::_ops::upsample_linear1d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales, out);
14770 }
14771}
14772
14773// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
14774inline at::Tensor & upsample_linear1d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales, at::Tensor & out) {
14775 return at::_ops::upsample_linear1d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales, out);
14776}
14777namespace symint {
14778 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14779 at::Tensor & upsample_linear1d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales, at::Tensor & out) {
14780 return at::_ops::upsample_linear1d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales, out);
14781 }
14782}
14783
14784// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
14785inline at::Tensor & upsample_linear1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14786 return at::_ops::upsample_linear1d_out::call(self, output_size, align_corners, scales, out);
14787}
14788namespace symint {
14789 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14790 at::Tensor & upsample_linear1d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14791 return at::_ops::upsample_linear1d_out::call(self, output_size, align_corners, scales, out);
14792 }
14793}
14794
14795// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
14796inline at::Tensor & upsample_linear1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales, at::Tensor & out) {
14797 return at::_ops::upsample_linear1d_out::call(self, output_size, align_corners, scales, out);
14798}
14799namespace symint {
14800 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14801 at::Tensor & upsample_linear1d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales, at::Tensor & out) {
14802 return at::_ops::upsample_linear1d_out::call(self, output_size, align_corners, scales, out);
14803 }
14804}
14805
14806// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor
14807inline at::Tensor upsample_linear1d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14808 return at::_ops::upsample_linear1d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales);
14809}
14810namespace symint {
14811 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14812 at::Tensor upsample_linear1d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14813 return at::_ops::upsample_linear1d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales);
14814 }
14815}
14816
14817// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor
14818inline at::Tensor upsample_linear1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14819 return at::_ops::upsample_linear1d::call(self, output_size, align_corners, scales);
14820}
14821namespace symint {
14822 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14823 at::Tensor upsample_linear1d(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14824 return at::_ops::upsample_linear1d::call(self, output_size, align_corners, scales);
14825 }
14826}
14827
14828// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
14829inline at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14830 return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input);
14831}
14832namespace symint {
14833 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14834 at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14835 return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input);
14836 }
14837}
14838
14839// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
14840inline at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
14841 return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input);
14842}
14843namespace symint {
14844 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14845 at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
14846 return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input);
14847 }
14848}
14849
14850// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
14851inline at::Tensor & upsample_linear1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14852 return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input);
14853}
14854namespace symint {
14855 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14856 at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14857 return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input);
14858 }
14859}
14860
14861// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
14862inline at::Tensor & upsample_linear1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
14863 return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input);
14864}
14865namespace symint {
14866 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14867 at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
14868 return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input);
14869 }
14870}
14871
14872// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
14873inline at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14874 return at::_ops::upsample_linear1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales);
14875}
14876namespace symint {
14877 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14878 at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14879 return at::_ops::upsample_linear1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales);
14880 }
14881}
14882
14883// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
14884inline at::Tensor upsample_linear1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14885 return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales);
14886}
14887namespace symint {
14888 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14889 at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
14890 return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales);
14891 }
14892}
14893
14894// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
14895inline at::Tensor & upsample_bilinear2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
14896 return at::_ops::upsample_bilinear2d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
14897}
14898namespace symint {
14899 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14900 at::Tensor & upsample_bilinear2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
14901 return at::_ops::upsample_bilinear2d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
14902 }
14903}
14904
14905// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
14906inline at::Tensor & upsample_bilinear2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
14907 return at::_ops::upsample_bilinear2d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
14908}
14909namespace symint {
14910 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14911 at::Tensor & upsample_bilinear2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
14912 return at::_ops::upsample_bilinear2d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
14913 }
14914}
14915
14916// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
14917inline at::Tensor & upsample_bilinear2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
14918 return at::_ops::upsample_bilinear2d_out::call(self, output_size, align_corners, scales_h, scales_w, out);
14919}
14920namespace symint {
14921 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14922 at::Tensor & upsample_bilinear2d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
14923 return at::_ops::upsample_bilinear2d_out::call(self, output_size, align_corners, scales_h, scales_w, out);
14924 }
14925}
14926
14927// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
14928inline at::Tensor & upsample_bilinear2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
14929 return at::_ops::upsample_bilinear2d_out::call(self, output_size, align_corners, scales_h, scales_w, out);
14930}
14931namespace symint {
14932 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14933 at::Tensor & upsample_bilinear2d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
14934 return at::_ops::upsample_bilinear2d_out::call(self, output_size, align_corners, scales_h, scales_w, out);
14935 }
14936}
14937
14938// aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
14939inline at::Tensor upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
14940 return at::_ops::upsample_bilinear2d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w);
14941}
14942namespace symint {
14943 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14944 at::Tensor upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
14945 return at::_ops::upsample_bilinear2d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w);
14946 }
14947}
14948
14949// aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
14950inline at::Tensor upsample_bilinear2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
14951 return at::_ops::upsample_bilinear2d::call(self, output_size, align_corners, scales_h, scales_w);
14952}
14953namespace symint {
14954 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14955 at::Tensor upsample_bilinear2d(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
14956 return at::_ops::upsample_bilinear2d::call(self, output_size, align_corners, scales_h, scales_w);
14957 }
14958}
14959
14960// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
14961inline at::Tensor & upsample_bilinear2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
14962 return at::_ops::upsample_bilinear2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
14963}
14964namespace symint {
14965 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14966 at::Tensor & upsample_bilinear2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
14967 return at::_ops::upsample_bilinear2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
14968 }
14969}
14970
14971// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
14972inline at::Tensor & upsample_bilinear2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
14973 return at::_ops::upsample_bilinear2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
14974}
14975namespace symint {
14976 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
14977 at::Tensor & upsample_bilinear2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
14978 return at::_ops::upsample_bilinear2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
14979 }
14980}
14981
14982// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
14983inline at::Tensor & upsample_bilinear2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
14984 return at::_ops::upsample_bilinear2d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
14985}
14986namespace symint {
14987 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14988 at::Tensor & upsample_bilinear2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
14989 return at::_ops::upsample_bilinear2d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
14990 }
14991}
14992
14993// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
14994inline at::Tensor & upsample_bilinear2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
14995 return at::_ops::upsample_bilinear2d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
14996}
14997namespace symint {
14998 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
14999 at::Tensor & upsample_bilinear2d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15000 return at::_ops::upsample_bilinear2d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15001 }
15002}
15003
15004// aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15005inline at::Tensor upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15006 return at::_ops::upsample_bilinear2d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w);
15007}
15008namespace symint {
15009 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15010 at::Tensor upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15011 return at::_ops::upsample_bilinear2d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w);
15012 }
15013}
15014
15015// aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15016inline at::Tensor upsample_bilinear2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15017 return at::_ops::upsample_bilinear2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
15018}
15019namespace symint {
15020 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15021 at::Tensor upsample_bilinear2d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15022 return at::_ops::upsample_bilinear2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
15023 }
15024}
15025
15026// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15027inline at::Tensor & _upsample_bilinear2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15028 return at::_ops::_upsample_bilinear2d_aa_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
15029}
15030namespace symint {
15031 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15032 at::Tensor & _upsample_bilinear2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15033 return at::_ops::_upsample_bilinear2d_aa_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
15034 }
15035}
15036
15037// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15038inline at::Tensor & _upsample_bilinear2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15039 return at::_ops::_upsample_bilinear2d_aa_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
15040}
15041namespace symint {
15042 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15043 at::Tensor & _upsample_bilinear2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15044 return at::_ops::_upsample_bilinear2d_aa_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
15045 }
15046}
15047
15048// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15049inline at::Tensor & _upsample_bilinear2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15050 return at::_ops::_upsample_bilinear2d_aa_out::call(self, output_size, align_corners, scales_h, scales_w, out);
15051}
15052namespace symint {
15053 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15054 at::Tensor & _upsample_bilinear2d_aa_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15055 return at::_ops::_upsample_bilinear2d_aa_out::call(self, output_size, align_corners, scales_h, scales_w, out);
15056 }
15057}
15058
15059// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15060inline at::Tensor & _upsample_bilinear2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15061 return at::_ops::_upsample_bilinear2d_aa_out::call(self, output_size, align_corners, scales_h, scales_w, out);
15062}
15063namespace symint {
15064 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15065 at::Tensor & _upsample_bilinear2d_aa_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15066 return at::_ops::_upsample_bilinear2d_aa_out::call(self, output_size, align_corners, scales_h, scales_w, out);
15067 }
15068}
15069
15070// aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15071inline at::Tensor _upsample_bilinear2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15072 return at::_ops::_upsample_bilinear2d_aa::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w);
15073}
15074namespace symint {
15075 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15076 at::Tensor _upsample_bilinear2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15077 return at::_ops::_upsample_bilinear2d_aa::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w);
15078 }
15079}
15080
15081// aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15082inline at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15083 return at::_ops::_upsample_bilinear2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
15084}
15085namespace symint {
15086 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15087 at::Tensor _upsample_bilinear2d_aa(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15088 return at::_ops::_upsample_bilinear2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
15089 }
15090}
15091
15092// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15093inline at::Tensor & _upsample_bilinear2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15094 return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
15095}
15096namespace symint {
15097 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15098 at::Tensor & _upsample_bilinear2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15099 return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
15100 }
15101}
15102
15103// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15104inline at::Tensor & _upsample_bilinear2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15105 return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
15106}
15107namespace symint {
15108 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15109 at::Tensor & _upsample_bilinear2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15110 return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
15111 }
15112}
15113
15114// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15115inline at::Tensor & _upsample_bilinear2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15116 return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15117}
15118namespace symint {
15119 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15120 at::Tensor & _upsample_bilinear2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15121 return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15122 }
15123}
15124
15125// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15126inline at::Tensor & _upsample_bilinear2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15127 return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15128}
15129namespace symint {
15130 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15131 at::Tensor & _upsample_bilinear2d_aa_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15132 return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15133 }
15134}
15135
15136// aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15137inline at::Tensor _upsample_bilinear2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15138 return at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w);
15139}
15140namespace symint {
15141 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15142 at::Tensor _upsample_bilinear2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15143 return at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w);
15144 }
15145}
15146
15147// aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15148inline at::Tensor _upsample_bilinear2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15149 return at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
15150}
15151namespace symint {
15152 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15153 at::Tensor _upsample_bilinear2d_aa_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15154 return at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
15155 }
15156}
15157
15158// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15159inline at::Tensor & upsample_bicubic2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15160 return at::_ops::upsample_bicubic2d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
15161}
15162namespace symint {
15163 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15164 at::Tensor & upsample_bicubic2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15165 return at::_ops::upsample_bicubic2d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
15166 }
15167}
15168
15169// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15170inline at::Tensor & upsample_bicubic2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15171 return at::_ops::upsample_bicubic2d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
15172}
15173namespace symint {
15174 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15175 at::Tensor & upsample_bicubic2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15176 return at::_ops::upsample_bicubic2d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
15177 }
15178}
15179
15180// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15181inline at::Tensor & upsample_bicubic2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15182 return at::_ops::upsample_bicubic2d_out::call(self, output_size, align_corners, scales_h, scales_w, out);
15183}
15184namespace symint {
15185 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15186 at::Tensor & upsample_bicubic2d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15187 return at::_ops::upsample_bicubic2d_out::call(self, output_size, align_corners, scales_h, scales_w, out);
15188 }
15189}
15190
15191// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15192inline at::Tensor & upsample_bicubic2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15193 return at::_ops::upsample_bicubic2d_out::call(self, output_size, align_corners, scales_h, scales_w, out);
15194}
15195namespace symint {
15196 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15197 at::Tensor & upsample_bicubic2d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15198 return at::_ops::upsample_bicubic2d_out::call(self, output_size, align_corners, scales_h, scales_w, out);
15199 }
15200}
15201
15202// aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15203inline at::Tensor upsample_bicubic2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15204 return at::_ops::upsample_bicubic2d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w);
15205}
15206namespace symint {
15207 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15208 at::Tensor upsample_bicubic2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15209 return at::_ops::upsample_bicubic2d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w);
15210 }
15211}
15212
15213// aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15214inline at::Tensor upsample_bicubic2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15215 return at::_ops::upsample_bicubic2d::call(self, output_size, align_corners, scales_h, scales_w);
15216}
15217namespace symint {
15218 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15219 at::Tensor upsample_bicubic2d(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15220 return at::_ops::upsample_bicubic2d::call(self, output_size, align_corners, scales_h, scales_w);
15221 }
15222}
15223
15224// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15225inline at::Tensor & upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15226 return at::_ops::upsample_bicubic2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
15227}
15228namespace symint {
15229 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15230 at::Tensor & upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15231 return at::_ops::upsample_bicubic2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
15232 }
15233}
15234
15235// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15236inline at::Tensor & upsample_bicubic2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15237 return at::_ops::upsample_bicubic2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
15238}
15239namespace symint {
15240 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15241 at::Tensor & upsample_bicubic2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15242 return at::_ops::upsample_bicubic2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
15243 }
15244}
15245
15246// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15247inline at::Tensor & upsample_bicubic2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15248 return at::_ops::upsample_bicubic2d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15249}
15250namespace symint {
15251 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15252 at::Tensor & upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15253 return at::_ops::upsample_bicubic2d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15254 }
15255}
15256
15257// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15258inline at::Tensor & upsample_bicubic2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15259 return at::_ops::upsample_bicubic2d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15260}
15261namespace symint {
15262 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15263 at::Tensor & upsample_bicubic2d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15264 return at::_ops::upsample_bicubic2d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15265 }
15266}
15267
15268// aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15269inline at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15270 return at::_ops::upsample_bicubic2d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w);
15271}
15272namespace symint {
15273 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15274 at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15275 return at::_ops::upsample_bicubic2d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w);
15276 }
15277}
15278
15279// aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15280inline at::Tensor upsample_bicubic2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15281 return at::_ops::upsample_bicubic2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
15282}
15283namespace symint {
15284 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15285 at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15286 return at::_ops::upsample_bicubic2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
15287 }
15288}
15289
15290// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15291inline at::Tensor & _upsample_bicubic2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15292 return at::_ops::_upsample_bicubic2d_aa_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
15293}
15294namespace symint {
15295 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15296 at::Tensor & _upsample_bicubic2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15297 return at::_ops::_upsample_bicubic2d_aa_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
15298 }
15299}
15300
15301// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15302inline at::Tensor & _upsample_bicubic2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15303 return at::_ops::_upsample_bicubic2d_aa_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
15304}
15305namespace symint {
15306 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15307 at::Tensor & _upsample_bicubic2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15308 return at::_ops::_upsample_bicubic2d_aa_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w, out);
15309 }
15310}
15311
15312// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15313inline at::Tensor & _upsample_bicubic2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15314 return at::_ops::_upsample_bicubic2d_aa_out::call(self, output_size, align_corners, scales_h, scales_w, out);
15315}
15316namespace symint {
15317 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15318 at::Tensor & _upsample_bicubic2d_aa_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15319 return at::_ops::_upsample_bicubic2d_aa_out::call(self, output_size, align_corners, scales_h, scales_w, out);
15320 }
15321}
15322
15323// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15324inline at::Tensor & _upsample_bicubic2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15325 return at::_ops::_upsample_bicubic2d_aa_out::call(self, output_size, align_corners, scales_h, scales_w, out);
15326}
15327namespace symint {
15328 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15329 at::Tensor & _upsample_bicubic2d_aa_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15330 return at::_ops::_upsample_bicubic2d_aa_out::call(self, output_size, align_corners, scales_h, scales_w, out);
15331 }
15332}
15333
15334// aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15335inline at::Tensor _upsample_bicubic2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15336 return at::_ops::_upsample_bicubic2d_aa::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w);
15337}
15338namespace symint {
15339 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15340 at::Tensor _upsample_bicubic2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15341 return at::_ops::_upsample_bicubic2d_aa::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_h, scales_w);
15342 }
15343}
15344
15345// aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15346inline at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15347 return at::_ops::_upsample_bicubic2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
15348}
15349namespace symint {
15350 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15351 at::Tensor _upsample_bicubic2d_aa(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15352 return at::_ops::_upsample_bicubic2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
15353 }
15354}
15355
15356// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15357inline at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15358 return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
15359}
15360namespace symint {
15361 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15362 at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15363 return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
15364 }
15365}
15366
15367// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15368inline at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15369 return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
15370}
15371namespace symint {
15372 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15373 at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15374 return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input);
15375 }
15376}
15377
15378// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15379inline at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15380 return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15381}
15382namespace symint {
15383 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15384 at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15385 return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15386 }
15387}
15388
15389// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15390inline at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15391 return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15392}
15393namespace symint {
15394 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15395 at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15396 return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input);
15397 }
15398}
15399
15400// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15401inline at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15402 return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w);
15403}
15404namespace symint {
15405 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15406 at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15407 return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w);
15408 }
15409}
15410
15411// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
15412inline at::Tensor _upsample_bicubic2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15413 return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
15414}
15415namespace symint {
15416 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15417 at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15418 return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
15419 }
15420}
15421
15422// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15423inline at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15424 return at::_ops::upsample_trilinear3d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out);
15425}
15426namespace symint {
15427 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15428 at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15429 return at::_ops::upsample_trilinear3d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out);
15430 }
15431}
15432
15433// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15434inline at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15435 return at::_ops::upsample_trilinear3d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out);
15436}
15437namespace symint {
15438 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15439 at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15440 return at::_ops::upsample_trilinear3d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out);
15441 }
15442}
15443
15444// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15445inline at::Tensor & upsample_trilinear3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15446 return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out);
15447}
15448namespace symint {
15449 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15450 at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15451 return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out);
15452 }
15453}
15454
15455// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15456inline at::Tensor & upsample_trilinear3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15457 return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out);
15458}
15459namespace symint {
15460 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15461 at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15462 return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out);
15463 }
15464}
15465
15466// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
15467inline at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15468 return at::_ops::upsample_trilinear3d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w);
15469}
15470namespace symint {
15471 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15472 at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15473 return at::_ops::upsample_trilinear3d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w);
15474 }
15475}
15476
15477// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
15478inline at::Tensor upsample_trilinear3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15479 return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w);
15480}
15481namespace symint {
15482 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15483 at::Tensor upsample_trilinear3d(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15484 return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w);
15485 }
15486}
15487
15488// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15489inline at::Tensor & upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15490 return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w, grad_input);
15491}
15492namespace symint {
15493 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15494 at::Tensor & upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15495 return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w, grad_input);
15496 }
15497}
15498
15499// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15500inline at::Tensor & upsample_trilinear3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15501 return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w, grad_input);
15502}
15503namespace symint {
15504 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15505 at::Tensor & upsample_trilinear3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15506 return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w, grad_input);
15507 }
15508}
15509
15510// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15511inline at::Tensor & upsample_trilinear3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15512 return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input);
15513}
15514namespace symint {
15515 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15516 at::Tensor & upsample_trilinear3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15517 return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input);
15518 }
15519}
15520
15521// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15522inline at::Tensor & upsample_trilinear3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15523 return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input);
15524}
15525namespace symint {
15526 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15527 at::Tensor & upsample_trilinear3d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15528 return at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input);
15529 }
15530}
15531
15532// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
15533inline at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15534 return at::_ops::upsample_trilinear3d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w);
15535}
15536namespace symint {
15537 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15538 at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15539 return at::_ops::upsample_trilinear3d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_d, scales_h, scales_w);
15540 }
15541}
15542
15543// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
15544inline at::Tensor upsample_trilinear3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15545 return at::_ops::upsample_trilinear3d_backward::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
15546}
15547namespace symint {
15548 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15549 at::Tensor upsample_trilinear3d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15550 return at::_ops::upsample_trilinear3d_backward::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
15551 }
15552}
15553
15554// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
15555inline at::Tensor & upsample_nearest1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15556 return at::_ops::upsample_nearest1d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales, out);
15557}
15558namespace symint {
15559 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15560 at::Tensor & upsample_nearest1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15561 return at::_ops::upsample_nearest1d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales, out);
15562 }
15563}
15564
15565// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
15566inline at::Tensor & upsample_nearest1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
15567 return at::_ops::upsample_nearest1d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales, out);
15568}
15569namespace symint {
15570 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15571 at::Tensor & upsample_nearest1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
15572 return at::_ops::upsample_nearest1d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales, out);
15573 }
15574}
15575
15576// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
15577inline at::Tensor & upsample_nearest1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15578 return at::_ops::upsample_nearest1d_out::call(self, output_size, scales, out);
15579}
15580namespace symint {
15581 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15582 at::Tensor & upsample_nearest1d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15583 return at::_ops::upsample_nearest1d_out::call(self, output_size, scales, out);
15584 }
15585}
15586
15587// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
15588inline at::Tensor & upsample_nearest1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
15589 return at::_ops::upsample_nearest1d_out::call(self, output_size, scales, out);
15590}
15591namespace symint {
15592 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15593 at::Tensor & upsample_nearest1d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
15594 return at::_ops::upsample_nearest1d_out::call(self, output_size, scales, out);
15595 }
15596}
15597
15598// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
15599inline at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15600 return at::_ops::_upsample_nearest_exact1d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales, out);
15601}
15602namespace symint {
15603 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15604 at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15605 return at::_ops::_upsample_nearest_exact1d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales, out);
15606 }
15607}
15608
15609// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
15610inline at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
15611 return at::_ops::_upsample_nearest_exact1d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales, out);
15612}
15613namespace symint {
15614 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15615 at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
15616 return at::_ops::_upsample_nearest_exact1d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales, out);
15617 }
15618}
15619
15620// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
15621inline at::Tensor & _upsample_nearest_exact1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15622 return at::_ops::_upsample_nearest_exact1d_out::call(self, output_size, scales, out);
15623}
15624namespace symint {
15625 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15626 at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15627 return at::_ops::_upsample_nearest_exact1d_out::call(self, output_size, scales, out);
15628 }
15629}
15630
15631// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)
15632inline at::Tensor & _upsample_nearest_exact1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
15633 return at::_ops::_upsample_nearest_exact1d_out::call(self, output_size, scales, out);
15634}
15635namespace symint {
15636 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15637 at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales, at::Tensor & out) {
15638 return at::_ops::_upsample_nearest_exact1d_out::call(self, output_size, scales, out);
15639 }
15640}
15641
15642// aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
15643inline at::Tensor upsample_nearest1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15644 return at::_ops::upsample_nearest1d::call(self, c10::fromIntArrayRefSlow(output_size), scales);
15645}
15646namespace symint {
15647 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15648 at::Tensor upsample_nearest1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15649 return at::_ops::upsample_nearest1d::call(self, c10::fromIntArrayRefSlow(output_size), scales);
15650 }
15651}
15652
15653// aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
15654inline at::Tensor upsample_nearest1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15655 return at::_ops::upsample_nearest1d::call(self, output_size, scales);
15656}
15657namespace symint {
15658 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15659 at::Tensor upsample_nearest1d(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15660 return at::_ops::upsample_nearest1d::call(self, output_size, scales);
15661 }
15662}
15663
15664// aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
15665inline at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15666 return at::_ops::_upsample_nearest_exact1d::call(self, c10::fromIntArrayRefSlow(output_size), scales);
15667}
15668namespace symint {
15669 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15670 at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15671 return at::_ops::_upsample_nearest_exact1d::call(self, c10::fromIntArrayRefSlow(output_size), scales);
15672 }
15673}
15674
15675// aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor
15676inline at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15677 return at::_ops::_upsample_nearest_exact1d::call(self, output_size, scales);
15678}
15679namespace symint {
15680 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15681 at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales=c10::nullopt) {
15682 return at::_ops::_upsample_nearest_exact1d::call(self, output_size, scales);
15683 }
15684}
15685
15686// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15687inline at::Tensor & upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15688 return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input);
15689}
15690namespace symint {
15691 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15692 at::Tensor & upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15693 return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input);
15694 }
15695}
15696
15697// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15698inline at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
15699 return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input);
15700}
15701namespace symint {
15702 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15703 at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
15704 return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input);
15705 }
15706}
15707
15708// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15709inline at::Tensor & upsample_nearest1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15710 return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input);
15711}
15712namespace symint {
15713 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15714 at::Tensor & upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15715 return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input);
15716 }
15717}
15718
15719// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15720inline at::Tensor & upsample_nearest1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
15721 return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input);
15722}
15723namespace symint {
15724 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15725 at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
15726 return at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input);
15727 }
15728}
15729
15730// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15731inline at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15732 return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input);
15733}
15734namespace symint {
15735 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15736 at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15737 return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input);
15738 }
15739}
15740
15741// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15742inline at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
15743 return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input);
15744}
15745namespace symint {
15746 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15747 at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
15748 return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input);
15749 }
15750}
15751
15752// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15753inline at::Tensor & _upsample_nearest_exact1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15754 return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input);
15755}
15756namespace symint {
15757 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15758 at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15759 return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input);
15760 }
15761}
15762
15763// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15764inline at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
15765 return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input);
15766}
15767namespace symint {
15768 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15769 at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input) {
15770 return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input);
15771 }
15772}
15773
15774// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
15775inline at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15776 return at::_ops::upsample_nearest1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales);
15777}
15778namespace symint {
15779 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15780 at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15781 return at::_ops::upsample_nearest1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales);
15782 }
15783}
15784
15785// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
15786inline at::Tensor upsample_nearest1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15787 return at::_ops::upsample_nearest1d_backward::call(grad_output, output_size, input_size, scales);
15788}
15789namespace symint {
15790 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15791 at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15792 return at::_ops::upsample_nearest1d_backward::call(grad_output, output_size, input_size, scales);
15793 }
15794}
15795
15796// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
15797inline at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15798 return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales);
15799}
15800namespace symint {
15801 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15802 at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15803 return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales);
15804 }
15805}
15806
15807// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor
15808inline at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15809 return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales);
15810}
15811namespace symint {
15812 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15813 at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales=c10::nullopt) {
15814 return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales);
15815 }
15816}
15817
15818// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15819inline at::Tensor & upsample_nearest2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15820 return at::_ops::upsample_nearest2d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out);
15821}
15822namespace symint {
15823 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15824 at::Tensor & upsample_nearest2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15825 return at::_ops::upsample_nearest2d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out);
15826 }
15827}
15828
15829// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15830inline at::Tensor & upsample_nearest2d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15831 return at::_ops::upsample_nearest2d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out);
15832}
15833namespace symint {
15834 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15835 at::Tensor & upsample_nearest2d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15836 return at::_ops::upsample_nearest2d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out);
15837 }
15838}
15839
15840// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15841inline at::Tensor & upsample_nearest2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15842 return at::_ops::upsample_nearest2d_out::call(self, output_size, scales_h, scales_w, out);
15843}
15844namespace symint {
15845 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15846 at::Tensor & upsample_nearest2d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15847 return at::_ops::upsample_nearest2d_out::call(self, output_size, scales_h, scales_w, out);
15848 }
15849}
15850
15851// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15852inline at::Tensor & upsample_nearest2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15853 return at::_ops::upsample_nearest2d_out::call(self, output_size, scales_h, scales_w, out);
15854}
15855namespace symint {
15856 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15857 at::Tensor & upsample_nearest2d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15858 return at::_ops::upsample_nearest2d_out::call(self, output_size, scales_h, scales_w, out);
15859 }
15860}
15861
15862// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15863inline at::Tensor & _upsample_nearest_exact2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15864 return at::_ops::_upsample_nearest_exact2d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out);
15865}
15866namespace symint {
15867 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15869 return at::_ops::_upsample_nearest_exact2d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out);
15870 }
15871}
15872
15873// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15874inline at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15875 return at::_ops::_upsample_nearest_exact2d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out);
15876}
15877namespace symint {
15878 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15879 at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15880 return at::_ops::_upsample_nearest_exact2d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out);
15881 }
15882}
15883
15884// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15885inline at::Tensor & _upsample_nearest_exact2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15886 return at::_ops::_upsample_nearest_exact2d_out::call(self, output_size, scales_h, scales_w, out);
15887}
15888namespace symint {
15889 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15891 return at::_ops::_upsample_nearest_exact2d_out::call(self, output_size, scales_h, scales_w, out);
15892 }
15893}
15894
15895// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
15896inline at::Tensor & _upsample_nearest_exact2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15897 return at::_ops::_upsample_nearest_exact2d_out::call(self, output_size, scales_h, scales_w, out);
15898}
15899namespace symint {
15900 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15901 at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
15902 return at::_ops::_upsample_nearest_exact2d_out::call(self, output_size, scales_h, scales_w, out);
15903 }
15904}
15905
15906// aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
15907inline at::Tensor upsample_nearest2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15908 return at::_ops::upsample_nearest2d::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w);
15909}
15910namespace symint {
15911 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15913 return at::_ops::upsample_nearest2d::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w);
15914 }
15915}
15916
15917// aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
15918inline at::Tensor upsample_nearest2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15919 return at::_ops::upsample_nearest2d::call(self, output_size, scales_h, scales_w);
15920}
15921namespace symint {
15922 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15923 at::Tensor upsample_nearest2d(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15924 return at::_ops::upsample_nearest2d::call(self, output_size, scales_h, scales_w);
15925 }
15926}
15927
15928// aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
15929inline at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15930 return at::_ops::_upsample_nearest_exact2d::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w);
15931}
15932namespace symint {
15933 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15935 return at::_ops::_upsample_nearest_exact2d::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w);
15936 }
15937}
15938
15939// aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
15940inline at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15941 return at::_ops::_upsample_nearest_exact2d::call(self, output_size, scales_h, scales_w);
15942}
15943namespace symint {
15944 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15946 return at::_ops::_upsample_nearest_exact2d::call(self, output_size, scales_h, scales_w);
15947 }
15948}
15949
15950// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15951inline at::Tensor & upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15952 return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input);
15953}
15954namespace symint {
15955 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15956 at::Tensor & upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15957 return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input);
15958 }
15959}
15960
15961// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15962inline at::Tensor & upsample_nearest2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15963 return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input);
15964}
15965namespace symint {
15966 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
15967 at::Tensor & upsample_nearest2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15968 return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input);
15969 }
15970}
15971
15972// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15973inline at::Tensor & upsample_nearest2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15974 return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
15975}
15976namespace symint {
15977 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15978 at::Tensor & upsample_nearest2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15979 return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
15980 }
15981}
15982
15983// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15984inline at::Tensor & upsample_nearest2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15985 return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
15986}
15987namespace symint {
15988 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
15989 at::Tensor & upsample_nearest2d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
15990 return at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
15991 }
15992}
15993
15994// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
15995inline at::Tensor & _upsample_nearest_exact2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
15996 return at::_ops::_upsample_nearest_exact2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input);
15997}
15998namespace symint {
15999 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16000 at::Tensor & _upsample_nearest_exact2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16001 return at::_ops::_upsample_nearest_exact2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input);
16002 }
16003}
16004
16005// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16006inline at::Tensor & _upsample_nearest_exact2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
16007 return at::_ops::_upsample_nearest_exact2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input);
16008}
16009namespace symint {
16010 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16011 at::Tensor & _upsample_nearest_exact2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
16012 return at::_ops::_upsample_nearest_exact2d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w, grad_input);
16013 }
16014}
16015
16016// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16017inline at::Tensor & _upsample_nearest_exact2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16018 return at::_ops::_upsample_nearest_exact2d_backward_grad_input::call(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
16019}
16020namespace symint {
16021 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16022 at::Tensor & _upsample_nearest_exact2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16023 return at::_ops::_upsample_nearest_exact2d_backward_grad_input::call(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
16024 }
16025}
16026
16027// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16028inline at::Tensor & _upsample_nearest_exact2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
16029 return at::_ops::_upsample_nearest_exact2d_backward_grad_input::call(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
16030}
16031namespace symint {
16032 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16033 at::Tensor & _upsample_nearest_exact2d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
16034 return at::_ops::_upsample_nearest_exact2d_backward_grad_input::call(grad_output, output_size, input_size, scales_h, scales_w, grad_input);
16035 }
16036}
16037
16038// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
16039inline at::Tensor upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16040 return at::_ops::upsample_nearest2d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w);
16041}
16042namespace symint {
16043 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16044 at::Tensor upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16045 return at::_ops::upsample_nearest2d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w);
16046 }
16047}
16048
16049// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
16050inline at::Tensor upsample_nearest2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16051 return at::_ops::upsample_nearest2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
16052}
16053namespace symint {
16054 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16055 at::Tensor upsample_nearest2d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16056 return at::_ops::upsample_nearest2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
16057 }
16058}
16059
16060// aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
16061inline at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16062 return at::_ops::_upsample_nearest_exact2d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w);
16063}
16064namespace symint {
16065 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16066 at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16067 return at::_ops::_upsample_nearest_exact2d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_h, scales_w);
16068 }
16069}
16070
16071// aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
16072inline at::Tensor _upsample_nearest_exact2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16073 return at::_ops::_upsample_nearest_exact2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
16074}
16075namespace symint {
16076 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16077 at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16078 return at::_ops::_upsample_nearest_exact2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
16079 }
16080}
16081
16082// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
16084 return at::_ops::upsample_nearest3d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w, out);
16085}
16086namespace symint {
16087 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16089 return at::_ops::upsample_nearest3d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w, out);
16090 }
16091}
16092
16093// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
16094inline at::Tensor & upsample_nearest3d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
16095 return at::_ops::upsample_nearest3d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w, out);
16096}
16097namespace symint {
16098 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16099 at::Tensor & upsample_nearest3d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
16100 return at::_ops::upsample_nearest3d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w, out);
16101 }
16102}
16103
16104// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
16106 return at::_ops::upsample_nearest3d_out::call(self, output_size, scales_d, scales_h, scales_w, out);
16107}
16108namespace symint {
16109 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16111 return at::_ops::upsample_nearest3d_out::call(self, output_size, scales_d, scales_h, scales_w, out);
16112 }
16113}
16114
16115// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
16116inline at::Tensor & upsample_nearest3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
16117 return at::_ops::upsample_nearest3d_out::call(self, output_size, scales_d, scales_h, scales_w, out);
16118}
16119namespace symint {
16120 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16121 at::Tensor & upsample_nearest3d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
16122 return at::_ops::upsample_nearest3d_out::call(self, output_size, scales_d, scales_h, scales_w, out);
16123 }
16124}
16125
16126// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
16128 return at::_ops::_upsample_nearest_exact3d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w, out);
16129}
16130namespace symint {
16131 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16133 return at::_ops::_upsample_nearest_exact3d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w, out);
16134 }
16135}
16136
16137// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
16138inline at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
16139 return at::_ops::_upsample_nearest_exact3d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w, out);
16140}
16141namespace symint {
16142 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16143 at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
16144 return at::_ops::_upsample_nearest_exact3d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w, out);
16145 }
16146}
16147
16148// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
16150 return at::_ops::_upsample_nearest_exact3d_out::call(self, output_size, scales_d, scales_h, scales_w, out);
16151}
16152namespace symint {
16153 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16155 return at::_ops::_upsample_nearest_exact3d_out::call(self, output_size, scales_d, scales_h, scales_w, out);
16156 }
16157}
16158
16159// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)
16160inline at::Tensor & _upsample_nearest_exact3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
16161 return at::_ops::_upsample_nearest_exact3d_out::call(self, output_size, scales_d, scales_h, scales_w, out);
16162}
16163namespace symint {
16164 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16165 at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) {
16166 return at::_ops::_upsample_nearest_exact3d_out::call(self, output_size, scales_d, scales_h, scales_w, out);
16167 }
16168}
16169
16170// aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
16172 return at::_ops::upsample_nearest3d::call(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w);
16173}
16174namespace symint {
16175 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16177 return at::_ops::upsample_nearest3d::call(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w);
16178 }
16179}
16180
16181// aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
16183 return at::_ops::upsample_nearest3d::call(self, output_size, scales_d, scales_h, scales_w);
16184}
16185namespace symint {
16186 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16188 return at::_ops::upsample_nearest3d::call(self, output_size, scales_d, scales_h, scales_w);
16189 }
16190}
16191
16192// aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
16194 return at::_ops::_upsample_nearest_exact3d::call(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w);
16195}
16196namespace symint {
16197 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16199 return at::_ops::_upsample_nearest_exact3d::call(self, c10::fromIntArrayRefSlow(output_size), scales_d, scales_h, scales_w);
16200 }
16201}
16202
16203// aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
16205 return at::_ops::_upsample_nearest_exact3d::call(self, output_size, scales_d, scales_h, scales_w);
16206}
16207namespace symint {
16208 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16210 return at::_ops::_upsample_nearest_exact3d::call(self, output_size, scales_d, scales_h, scales_w);
16211 }
16212}
16213
16214// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16215inline at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16216 return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input);
16217}
16218namespace symint {
16219 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16220 at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16221 return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input);
16222 }
16223}
16224
16225// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16226inline at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
16227 return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input);
16228}
16229namespace symint {
16230 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16231 at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
16232 return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input);
16233 }
16234}
16235
16236// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16237inline at::Tensor & upsample_nearest3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16238 return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
16239}
16240namespace symint {
16241 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16242 at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16243 return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
16244 }
16245}
16246
16247// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16248inline at::Tensor & upsample_nearest3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
16249 return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
16250}
16251namespace symint {
16252 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16253 at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
16254 return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
16255 }
16256}
16257
16258// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16259inline at::Tensor & _upsample_nearest_exact3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16260 return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input);
16261}
16262namespace symint {
16263 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16264 at::Tensor & _upsample_nearest_exact3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16265 return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input);
16266 }
16267}
16268
16269// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16270inline at::Tensor & _upsample_nearest_exact3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
16271 return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input);
16272}
16273namespace symint {
16274 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16275 at::Tensor & _upsample_nearest_exact3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
16276 return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input);
16277 }
16278}
16279
16280// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16281inline at::Tensor & _upsample_nearest_exact3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16282 return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
16283}
16284namespace symint {
16285 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16286 at::Tensor & _upsample_nearest_exact3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16287 return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
16288 }
16289}
16290
16291// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16292inline at::Tensor & _upsample_nearest_exact3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
16293 return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
16294}
16295namespace symint {
16296 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16297 at::Tensor & _upsample_nearest_exact3d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) {
16298 return at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input);
16299 }
16300}
16301
16302// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
16303inline at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16304 return at::_ops::upsample_nearest3d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w);
16305}
16306namespace symint {
16307 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16308 at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16309 return at::_ops::upsample_nearest3d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w);
16310 }
16311}
16312
16313// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
16314inline at::Tensor upsample_nearest3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16315 return at::_ops::upsample_nearest3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
16316}
16317namespace symint {
16318 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16319 at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16320 return at::_ops::upsample_nearest3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
16321 }
16322}
16323
16324// aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
16325inline at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16326 return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w);
16327}
16328namespace symint {
16329 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16330 at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16331 return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w);
16332 }
16333}
16334
16335// aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
16336inline at::Tensor _upsample_nearest_exact3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16337 return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
16338}
16339namespace symint {
16340 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16341 at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt) {
16342 return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
16343 }
16344}
16345
16346// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
16347inline at::Tensor & sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) {
16348 return at::_ops::sigmoid_backward_grad_input::call(grad_output, output, grad_input);
16349}
16350// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
16351inline at::Tensor & sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
16352 return at::_ops::sigmoid_backward_grad_input::call(grad_output, output, grad_input);
16353}
16354
16355// aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
16356inline at::Tensor sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output) {
16357 return at::_ops::sigmoid_backward::call(grad_output, output);
16358}
16359
16360// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16361inline at::Tensor & logit_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps=c10::nullopt) {
16362 return at::_ops::logit_backward_grad_input::call(grad_output, self, eps, grad_input);
16363}
16364// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
16365inline at::Tensor & logit_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps, at::Tensor & grad_input) {
16366 return at::_ops::logit_backward_grad_input::call(grad_output, self, eps, grad_input);
16367}
16368
16369// aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor
16371 return at::_ops::logit_backward::call(grad_output, self, eps);
16372}
16373
16374// aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
16375inline at::Tensor & tanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) {
16376 return at::_ops::tanh_backward_grad_input::call(grad_output, output, grad_input);
16377}
16378// aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
16379inline at::Tensor & tanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
16380 return at::_ops::tanh_backward_grad_input::call(grad_output, output, grad_input);
16381}
16382
16383// aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor
16384inline at::Tensor tanh_backward(const at::Tensor & grad_output, const at::Tensor & output) {
16385 return at::_ops::tanh_backward::call(grad_output, output);
16386}
16387
16388// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
16389inline at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) {
16390 return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation, out);
16391}
16392namespace symint {
16393 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16394 at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) {
16395 return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation, out);
16396 }
16397}
16398
16399// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
16400inline at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
16401 return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation, out);
16402}
16403namespace symint {
16404 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16405 at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
16406 return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation, out);
16407 }
16408}
16409
16410// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
16411inline at::Tensor & slow_conv_transpose2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
16412 return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
16413}
16414namespace symint {
16415 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16416 at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
16417 return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
16418 }
16419}
16420
16421// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
16422inline at::Tensor & slow_conv_transpose2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
16423 return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
16424}
16425namespace symint {
16426 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16427 at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
16428 return at::_ops::slow_conv_transpose2d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
16429 }
16430}
16431
16432// aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor
16433inline at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) {
16434 return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation);
16435}
16436namespace symint {
16437 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16438 at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) {
16439 return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation);
16440 }
16441}
16442
16443// aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor
16444inline at::Tensor slow_conv_transpose2d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
16445 return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
16446}
16447namespace symint {
16448 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16449 at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
16450 return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
16451 }
16452}
16453
16454// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
16455inline at::Tensor & slow_conv_transpose3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) {
16456 return at::_ops::slow_conv_transpose3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation, out);
16457}
16458namespace symint {
16459 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16460 at::Tensor & slow_conv_transpose3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) {
16461 return at::_ops::slow_conv_transpose3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation, out);
16462 }
16463}
16464
16465// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
16466inline at::Tensor & slow_conv_transpose3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
16467 return at::_ops::slow_conv_transpose3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation, out);
16468}
16469namespace symint {
16470 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16471 at::Tensor & slow_conv_transpose3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
16472 return at::_ops::slow_conv_transpose3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation, out);
16473 }
16474}
16475
16476// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
16477inline at::Tensor & slow_conv_transpose3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
16478 return at::_ops::slow_conv_transpose3d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
16479}
16480namespace symint {
16481 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16482 at::Tensor & slow_conv_transpose3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
16483 return at::_ops::slow_conv_transpose3d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
16484 }
16485}
16486
16487// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
16488inline at::Tensor & slow_conv_transpose3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
16489 return at::_ops::slow_conv_transpose3d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
16490}
16491namespace symint {
16492 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16493 at::Tensor & slow_conv_transpose3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) {
16494 return at::_ops::slow_conv_transpose3d_out::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out);
16495 }
16496}
16497
16498// aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor
16499inline at::Tensor slow_conv_transpose3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) {
16500 return at::_ops::slow_conv_transpose3d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation);
16501}
16502namespace symint {
16503 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16504 at::Tensor slow_conv_transpose3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) {
16505 return at::_ops::slow_conv_transpose3d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), dilation);
16506 }
16507}
16508
16509// aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor
16510inline at::Tensor slow_conv_transpose3d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
16511 return at::_ops::slow_conv_transpose3d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
16512}
16513namespace symint {
16514 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16515 at::Tensor slow_conv_transpose3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
16516 return at::_ops::slow_conv_transpose3d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
16517 }
16518}
16519
16520// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)
16521inline at::Tensor & thnn_conv2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) {
16522 return at::_ops::thnn_conv2d_out::call(self, weight, kernel_size, bias, stride, padding, out);
16523}
16524// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)
16525inline at::Tensor & thnn_conv2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
16526 return at::_ops::thnn_conv2d_out::call(self, weight, kernel_size, bias, stride, padding, out);
16527}
16528
16529// aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor
16530inline at::Tensor thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) {
16531 return at::_ops::thnn_conv2d::call(self, weight, kernel_size, bias, stride, padding);
16532}
16533
16534// aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)
16535inline at::Tensor & _slow_conv2d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
16536 return at::_ops::_slow_conv2d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output);
16537}
16538// aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)
16539inline at::Tensor & _slow_conv2d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) {
16540 return at::_ops::_slow_conv2d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output);
16541}
16542
16543// aten::_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor
16544inline at::Tensor _slow_conv2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
16545 return at::_ops::_slow_conv2d_forward::call(self, weight, kernel_size, bias, stride, padding);
16546}
16547
16548// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
16549inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding) {
16550 return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
16551}
16552// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
16553inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) {
16554 return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias);
16555}
16556
16557// aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
16558inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
16559 return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask);
16560}
16561
16562// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
16563inline const at::Tensor & _conv_depthwise2d_out(const at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) {
16564 return at::_ops::_conv_depthwise2d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
16565}
16566namespace symint {
16567 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16568 const at::Tensor & _conv_depthwise2d_out(const at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) {
16569 return at::_ops::_conv_depthwise2d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
16570 }
16571}
16572
16573// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
16574inline const at::Tensor & _conv_depthwise2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, const at::Tensor & out) {
16575 return at::_ops::_conv_depthwise2d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
16576}
16577namespace symint {
16578 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16579 const at::Tensor & _conv_depthwise2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, const at::Tensor & out) {
16580 return at::_ops::_conv_depthwise2d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
16581 }
16582}
16583
16584// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
16585inline const at::Tensor & _conv_depthwise2d_symint_out(const at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
16586 return at::_ops::_conv_depthwise2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
16587}
16588namespace symint {
16589 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16590 const at::Tensor & _conv_depthwise2d_out(const at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
16591 return at::_ops::_conv_depthwise2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
16592 }
16593}
16594
16595// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
16596inline const at::Tensor & _conv_depthwise2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, const at::Tensor & out) {
16597 return at::_ops::_conv_depthwise2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
16598}
16599namespace symint {
16600 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16601 const at::Tensor & _conv_depthwise2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, const at::Tensor & out) {
16602 return at::_ops::_conv_depthwise2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
16603 }
16604}
16605
16606// aten::_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor
16607inline at::Tensor _conv_depthwise2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) {
16608 return at::_ops::_conv_depthwise2d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation);
16609}
16610namespace symint {
16611 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16612 at::Tensor _conv_depthwise2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) {
16613 return at::_ops::_conv_depthwise2d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation);
16614 }
16615}
16616
16617// aten::_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor
16618inline at::Tensor _conv_depthwise2d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
16619 return at::_ops::_conv_depthwise2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
16620}
16621namespace symint {
16622 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16623 at::Tensor _conv_depthwise2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
16624 return at::_ops::_conv_depthwise2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
16625 }
16626}
16627
16628// aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor
16629inline at::Tensor conv_depthwise3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) {
16630 return at::_ops::conv_depthwise3d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation);
16631}
16632namespace symint {
16633 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16634 at::Tensor conv_depthwise3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) {
16635 return at::_ops::conv_depthwise3d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation);
16636 }
16637}
16638
16639// aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor
16640inline at::Tensor conv_depthwise3d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
16641 return at::_ops::conv_depthwise3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
16642}
16643namespace symint {
16644 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16645 at::Tensor conv_depthwise3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
16646 return at::_ops::conv_depthwise3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
16647 }
16648}
16649
16650// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
16651inline at::Tensor & slow_conv3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) {
16652 return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), out);
16653}
16654namespace symint {
16655 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16656 at::Tensor & slow_conv3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) {
16657 return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), out);
16658 }
16659}
16660
16661// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
16662inline at::Tensor & slow_conv3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
16663 return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), out);
16664}
16665namespace symint {
16666 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16667 at::Tensor & slow_conv3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
16668 return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), out);
16669 }
16670}
16671
16672// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
16673inline at::Tensor & slow_conv3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0)) {
16674 return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, padding, out);
16675}
16676namespace symint {
16677 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16678 at::Tensor & slow_conv3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0)) {
16679 return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, padding, out);
16680 }
16681}
16682
16683// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
16684inline at::Tensor & slow_conv3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
16685 return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, padding, out);
16686}
16687namespace symint {
16688 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16689 at::Tensor & slow_conv3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
16690 return at::_ops::slow_conv3d_out::call(self, weight, kernel_size, bias, stride, padding, out);
16691 }
16692}
16693
16694// aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor
16695inline at::Tensor slow_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) {
16696 return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding));
16697}
16698namespace symint {
16699 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16700 at::Tensor slow_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) {
16701 return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding));
16702 }
16703}
16704
16705// aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor
16706inline at::Tensor slow_conv3d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0)) {
16707 return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding);
16708}
16709namespace symint {
16710 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16711 at::Tensor slow_conv3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0)) {
16712 return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding);
16713 }
16714}
16715
16716// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
16717inline at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
16718 return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), output);
16719}
16720namespace symint {
16721 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16722 at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
16723 return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), output);
16724 }
16725}
16726
16727// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
16728inline at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) {
16729 return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), output);
16730}
16731namespace symint {
16732 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16733 at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) {
16734 return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), output);
16735 }
16736}
16737
16738// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
16739inline at::Tensor & slow_conv3d_forward_symint_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
16740 return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output);
16741}
16742namespace symint {
16743 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16744 at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
16745 return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output);
16746 }
16747}
16748
16749// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!)
16750inline at::Tensor & slow_conv3d_forward_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
16751 return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output);
16752}
16753namespace symint {
16754 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16755 at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
16756 return at::_ops::slow_conv3d_forward_output::call(self, weight, kernel_size, bias, stride, padding, output);
16757 }
16758}
16759
16760// aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor
16761inline at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
16762 return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding));
16763}
16764namespace symint {
16765 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16766 at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
16767 return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding));
16768 }
16769}
16770
16771// aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor
16772inline at::Tensor slow_conv3d_forward_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
16773 return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding);
16774}
16775namespace symint {
16776 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16777 at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
16778 return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding);
16779 }
16780}
16781
16782// aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor
16783inline at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) {
16784 return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation);
16785}
16786namespace symint {
16787 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16788 at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) {
16789 return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation);
16790 }
16791}
16792
16793// aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor
16794inline at::Tensor slow_conv_dilated2d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
16795 return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
16796}
16797namespace symint {
16798 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16799 at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
16800 return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
16801 }
16802}
16803
16804// aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor
16805inline at::Tensor slow_conv_dilated3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) {
16806 return at::_ops::slow_conv_dilated3d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation);
16807}
16808namespace symint {
16809 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16810 at::Tensor slow_conv_dilated3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) {
16811 return at::_ops::slow_conv_dilated3d::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation);
16812 }
16813}
16814
16815// aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor
16816inline at::Tensor slow_conv_dilated3d_symint(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
16817 return at::_ops::slow_conv_dilated3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
16818}
16819namespace symint {
16820 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16821 at::Tensor slow_conv_dilated3d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
16822 return at::_ops::slow_conv_dilated3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
16823 }
16824}
16825
16826// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
16827inline at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
16828 return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out);
16829}
16830namespace symint {
16831 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16832 at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
16833 return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out);
16834 }
16835}
16836
16837// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
16838inline at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
16839 return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out);
16840}
16841namespace symint {
16842 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16843 at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
16844 return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out);
16845 }
16846}
16847
16848// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
16849inline at::Tensor & col2im_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
16850 return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out);
16851}
16852namespace symint {
16853 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16854 at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
16855 return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out);
16856 }
16857}
16858
16859// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
16860inline at::Tensor & col2im_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
16861 return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out);
16862}
16863namespace symint {
16864 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16865 at::Tensor & col2im_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
16866 return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out);
16867 }
16868}
16869
16870// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
16871inline at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
16872 return at::_ops::col2im::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride);
16873}
16874namespace symint {
16875 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
16876 at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
16877 return at::_ops::col2im::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride);
16878 }
16879}
16880
16881// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
16882inline at::Tensor col2im_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
16883 return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride);
16884}
16885namespace symint {
16886 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
16887 at::Tensor col2im(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
16888 return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride);
16889 }
16890}
16891
16892// aten::column_stack(Tensor[] tensors) -> Tensor
16894 return at::_ops::column_stack::call(tensors);
16895}
16896
16897// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
16899 return at::_ops::column_stack_out::call(tensors, out);
16900}
16901// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
16903 return at::_ops::column_stack_out::call(tensors, out);
16904}
16905
16906// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
16907inline at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
16908 return at::_ops::im2col_out::call(self, kernel_size, dilation, padding, stride, out);
16909}
16910// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
16911inline at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
16912 return at::_ops::im2col_out::call(self, kernel_size, dilation, padding, stride, out);
16913}
16914
16915// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
16916inline at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
16917 return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride);
16918}
16919
16920// aten::isfinite(Tensor self) -> Tensor
16921inline at::Tensor isfinite(const at::Tensor & self) {
16922 return at::_ops::isfinite::call(self);
16923}
16924
16925// aten::isinf(Tensor self) -> Tensor
16926inline at::Tensor isinf(const at::Tensor & self) {
16927 return at::_ops::isinf::call(self);
16928}
16929
16930// aten::isposinf(Tensor self) -> Tensor
16931inline at::Tensor isposinf(const at::Tensor & self) {
16932 return at::_ops::isposinf::call(self);
16933}
16934
16935// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16936inline at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self) {
16937 return at::_ops::isposinf_out::call(self, out);
16938}
16939// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16940inline at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out) {
16941 return at::_ops::isposinf_out::call(self, out);
16942}
16943
16944// aten::isneginf(Tensor self) -> Tensor
16945inline at::Tensor isneginf(const at::Tensor & self) {
16946 return at::_ops::isneginf::call(self);
16947}
16948
16949// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16950inline at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) {
16951 return at::_ops::isneginf_out::call(self, out);
16952}
16953// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16954inline at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) {
16955 return at::_ops::isneginf_out::call(self, out);
16956}
16957
16958// aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor
16959inline at::Tensor _add_batch_dim(const at::Tensor & self, int64_t batch_dim, int64_t level) {
16960 return at::_ops::_add_batch_dim::call(self, batch_dim, level);
16961}
16962
16963// aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor
16964inline at::Tensor _remove_batch_dim(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) {
16965 return at::_ops::_remove_batch_dim::call(self, level, batch_size, out_dim);
16966}
16967
16968// aten::special_entr(Tensor self) -> Tensor
16969inline at::Tensor special_entr(const at::Tensor & self) {
16970 return at::_ops::special_entr::call(self);
16971}
16972
16973// aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16974inline at::Tensor & special_entr_out(at::Tensor & out, const at::Tensor & self) {
16975 return at::_ops::special_entr_out::call(self, out);
16976}
16977// aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16978inline at::Tensor & special_entr_outf(const at::Tensor & self, at::Tensor & out) {
16979 return at::_ops::special_entr_out::call(self, out);
16980}
16981
16982// aten::special_ndtri(Tensor self) -> Tensor
16984 return at::_ops::special_ndtri::call(self);
16985}
16986
16987// aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16988inline at::Tensor & special_ndtri_out(at::Tensor & out, const at::Tensor & self) {
16989 return at::_ops::special_ndtri_out::call(self, out);
16990}
16991// aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
16993 return at::_ops::special_ndtri_out::call(self, out);
16994}
16995
16996// aten::special_log_ndtr(Tensor self) -> Tensor
16998 return at::_ops::special_log_ndtr::call(self);
16999}
17000
17001// aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17003 return at::_ops::special_log_ndtr_out::call(self, out);
17004}
17005// aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17007 return at::_ops::special_log_ndtr_out::call(self, out);
17008}
17009
17010// aten::special_expm1(Tensor self) -> Tensor
17012 return at::_ops::special_expm1::call(self);
17013}
17014
17015// aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17016inline at::Tensor & special_expm1_out(at::Tensor & out, const at::Tensor & self) {
17017 return at::_ops::special_expm1_out::call(self, out);
17018}
17019// aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17021 return at::_ops::special_expm1_out::call(self, out);
17022}
17023
17024// aten::special_exp2(Tensor self) -> Tensor
17025inline at::Tensor special_exp2(const at::Tensor & self) {
17026 return at::_ops::special_exp2::call(self);
17027}
17028
17029// aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17030inline at::Tensor & special_exp2_out(at::Tensor & out, const at::Tensor & self) {
17031 return at::_ops::special_exp2_out::call(self, out);
17032}
17033// aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17034inline at::Tensor & special_exp2_outf(const at::Tensor & self, at::Tensor & out) {
17035 return at::_ops::special_exp2_out::call(self, out);
17036}
17037
17038// aten::special_psi(Tensor self) -> Tensor
17039inline at::Tensor special_psi(const at::Tensor & self) {
17040 return at::_ops::special_psi::call(self);
17041}
17042
17043// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17044inline at::Tensor & special_psi_out(at::Tensor & out, const at::Tensor & self) {
17045 return at::_ops::special_psi_out::call(self, out);
17046}
17047// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17048inline at::Tensor & special_psi_outf(const at::Tensor & self, at::Tensor & out) {
17049 return at::_ops::special_psi_out::call(self, out);
17050}
17051
17052// aten::special_digamma(Tensor self) -> Tensor
17054 return at::_ops::special_digamma::call(self);
17055}
17056
17057// aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17059 return at::_ops::special_digamma_out::call(self, out);
17060}
17061// aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17063 return at::_ops::special_digamma_out::call(self, out);
17064}
17065
17066// aten::special_gammaln(Tensor self) -> Tensor
17068 return at::_ops::special_gammaln::call(self);
17069}
17070
17071// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17073 return at::_ops::special_gammaln_out::call(self, out);
17074}
17075// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17077 return at::_ops::special_gammaln_out::call(self, out);
17078}
17079
17080// aten::special_erf(Tensor self) -> Tensor
17081inline at::Tensor special_erf(const at::Tensor & self) {
17082 return at::_ops::special_erf::call(self);
17083}
17084
17085// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17086inline at::Tensor & special_erf_out(at::Tensor & out, const at::Tensor & self) {
17087 return at::_ops::special_erf_out::call(self, out);
17088}
17089// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17090inline at::Tensor & special_erf_outf(const at::Tensor & self, at::Tensor & out) {
17091 return at::_ops::special_erf_out::call(self, out);
17092}
17093
17094// aten::special_erfc(Tensor self) -> Tensor
17095inline at::Tensor special_erfc(const at::Tensor & self) {
17096 return at::_ops::special_erfc::call(self);
17097}
17098
17099// aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17100inline at::Tensor & special_erfc_out(at::Tensor & out, const at::Tensor & self) {
17101 return at::_ops::special_erfc_out::call(self, out);
17102}
17103// aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17104inline at::Tensor & special_erfc_outf(const at::Tensor & self, at::Tensor & out) {
17105 return at::_ops::special_erfc_out::call(self, out);
17106}
17107
17108// aten::special_erfcx(Tensor self) -> Tensor
17110 return at::_ops::special_erfcx::call(self);
17111}
17112
17113// aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17114inline at::Tensor & special_erfcx_out(at::Tensor & out, const at::Tensor & self) {
17115 return at::_ops::special_erfcx_out::call(self, out);
17116}
17117// aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17119 return at::_ops::special_erfcx_out::call(self, out);
17120}
17121
17122// aten::special_erfinv(Tensor self) -> Tensor
17124 return at::_ops::special_erfinv::call(self);
17125}
17126
17127// aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17129 return at::_ops::special_erfinv_out::call(self, out);
17130}
17131// aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17133 return at::_ops::special_erfinv_out::call(self, out);
17134}
17135
17136// aten::special_ndtr(Tensor self) -> Tensor
17137inline at::Tensor special_ndtr(const at::Tensor & self) {
17138 return at::_ops::special_ndtr::call(self);
17139}
17140
17141// aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17142inline at::Tensor & special_ndtr_out(at::Tensor & out, const at::Tensor & self) {
17143 return at::_ops::special_ndtr_out::call(self, out);
17144}
17145// aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17146inline at::Tensor & special_ndtr_outf(const at::Tensor & self, at::Tensor & out) {
17147 return at::_ops::special_ndtr_out::call(self, out);
17148}
17149
17150// aten::special_xlog1py(Tensor self, Tensor other) -> Tensor
17151inline at::Tensor special_xlog1py(const at::Tensor & self, const at::Tensor & other) {
17152 return at::_ops::special_xlog1py::call(self, other);
17153}
17154
17155// aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor
17156inline at::Tensor special_xlog1py(const at::Scalar & self, const at::Tensor & other) {
17157 return at::_ops::special_xlog1py_self_scalar::call(self, other);
17158}
17159
17160// aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor
17161inline at::Tensor special_xlog1py(const at::Tensor & self, const at::Scalar & other) {
17162 return at::_ops::special_xlog1py_other_scalar::call(self, other);
17163}
17164
17165// aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17166inline at::Tensor & special_xlog1py_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
17167 return at::_ops::special_xlog1py_out::call(self, other, out);
17168}
17169// aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17170inline at::Tensor & special_xlog1py_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
17171 return at::_ops::special_xlog1py_out::call(self, other, out);
17172}
17173
17174// aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17175inline at::Tensor & special_xlog1py_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) {
17176 return at::_ops::special_xlog1py_self_scalar_out::call(self, other, out);
17177}
17178// aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17179inline at::Tensor & special_xlog1py_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
17180 return at::_ops::special_xlog1py_self_scalar_out::call(self, other, out);
17181}
17182
17183// aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
17184inline at::Tensor & special_xlog1py_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
17185 return at::_ops::special_xlog1py_other_scalar_out::call(self, other, out);
17186}
17187// aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
17188inline at::Tensor & special_xlog1py_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
17189 return at::_ops::special_xlog1py_other_scalar_out::call(self, other, out);
17190}
17191
17192// aten::special_xlogy(Tensor self, Tensor other) -> Tensor
17193inline at::Tensor special_xlogy(const at::Tensor & self, const at::Tensor & other) {
17194 return at::_ops::special_xlogy::call(self, other);
17195}
17196
17197// aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor
17198inline at::Tensor special_xlogy(const at::Scalar & self, const at::Tensor & other) {
17199 return at::_ops::special_xlogy_self_scalar::call(self, other);
17200}
17201
17202// aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor
17203inline at::Tensor special_xlogy(const at::Tensor & self, const at::Scalar & other) {
17204 return at::_ops::special_xlogy_other_scalar::call(self, other);
17205}
17206
17207// aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17208inline at::Tensor & special_xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
17209 return at::_ops::special_xlogy_out::call(self, other, out);
17210}
17211// aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17212inline at::Tensor & special_xlogy_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
17213 return at::_ops::special_xlogy_out::call(self, other, out);
17214}
17215
17216// aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17217inline at::Tensor & special_xlogy_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) {
17218 return at::_ops::special_xlogy_self_scalar_out::call(self, other, out);
17219}
17220// aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17221inline at::Tensor & special_xlogy_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
17222 return at::_ops::special_xlogy_self_scalar_out::call(self, other, out);
17223}
17224
17225// aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
17226inline at::Tensor & special_xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
17227 return at::_ops::special_xlogy_other_scalar_out::call(self, other, out);
17228}
17229// aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
17230inline at::Tensor & special_xlogy_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
17231 return at::_ops::special_xlogy_other_scalar_out::call(self, other, out);
17232}
17233
17234// aten::special_zeta(Tensor self, Tensor other) -> Tensor
17235inline at::Tensor special_zeta(const at::Tensor & self, const at::Tensor & other) {
17236 return at::_ops::special_zeta::call(self, other);
17237}
17238
17239// aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
17240inline at::Tensor special_zeta(const at::Scalar & self, const at::Tensor & other) {
17241 return at::_ops::special_zeta_self_scalar::call(self, other);
17242}
17243
17244// aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor
17245inline at::Tensor special_zeta(const at::Tensor & self, const at::Scalar & other) {
17246 return at::_ops::special_zeta_other_scalar::call(self, other);
17247}
17248
17249// aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17250inline at::Tensor & special_zeta_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
17251 return at::_ops::special_zeta_out::call(self, other, out);
17252}
17253// aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17254inline at::Tensor & special_zeta_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
17255 return at::_ops::special_zeta_out::call(self, other, out);
17256}
17257
17258// aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17259inline at::Tensor & special_zeta_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) {
17260 return at::_ops::special_zeta_self_scalar_out::call(self, other, out);
17261}
17262// aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17263inline at::Tensor & special_zeta_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
17264 return at::_ops::special_zeta_self_scalar_out::call(self, other, out);
17265}
17266
17267// aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
17268inline at::Tensor & special_zeta_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
17269 return at::_ops::special_zeta_other_scalar_out::call(self, other, out);
17270}
17271// aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
17272inline at::Tensor & special_zeta_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
17273 return at::_ops::special_zeta_other_scalar_out::call(self, other, out);
17274}
17275
17276// aten::special_i0(Tensor self) -> Tensor
17277inline at::Tensor special_i0(const at::Tensor & self) {
17278 return at::_ops::special_i0::call(self);
17279}
17280
17281// aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17282inline at::Tensor & special_i0_out(at::Tensor & out, const at::Tensor & self) {
17283 return at::_ops::special_i0_out::call(self, out);
17284}
17285// aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17286inline at::Tensor & special_i0_outf(const at::Tensor & self, at::Tensor & out) {
17287 return at::_ops::special_i0_out::call(self, out);
17288}
17289
17290// aten::special_i0e(Tensor self) -> Tensor
17291inline at::Tensor special_i0e(const at::Tensor & self) {
17292 return at::_ops::special_i0e::call(self);
17293}
17294
17295// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17296inline at::Tensor & special_i0e_out(at::Tensor & out, const at::Tensor & self) {
17297 return at::_ops::special_i0e_out::call(self, out);
17298}
17299// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17300inline at::Tensor & special_i0e_outf(const at::Tensor & self, at::Tensor & out) {
17301 return at::_ops::special_i0e_out::call(self, out);
17302}
17303
17304// aten::special_i1(Tensor self) -> Tensor
17305inline at::Tensor special_i1(const at::Tensor & self) {
17306 return at::_ops::special_i1::call(self);
17307}
17308
17309// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17310inline at::Tensor & special_i1_out(at::Tensor & out, const at::Tensor & self) {
17311 return at::_ops::special_i1_out::call(self, out);
17312}
17313// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17314inline at::Tensor & special_i1_outf(const at::Tensor & self, at::Tensor & out) {
17315 return at::_ops::special_i1_out::call(self, out);
17316}
17317
17318// aten::special_i1e(Tensor self) -> Tensor
17319inline at::Tensor special_i1e(const at::Tensor & self) {
17320 return at::_ops::special_i1e::call(self);
17321}
17322
17323// aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17324inline at::Tensor & special_i1e_out(at::Tensor & out, const at::Tensor & self) {
17325 return at::_ops::special_i1e_out::call(self, out);
17326}
17327// aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17328inline at::Tensor & special_i1e_outf(const at::Tensor & self, at::Tensor & out) {
17329 return at::_ops::special_i1e_out::call(self, out);
17330}
17331
17332// aten::special_logit(Tensor self, float? eps=None) -> Tensor
17334 return at::_ops::special_logit::call(self, eps);
17335}
17336
17337// aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
17339 return at::_ops::special_logit_out::call(self, eps, out);
17340}
17341// aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
17343 return at::_ops::special_logit_out::call(self, eps, out);
17344}
17345
17346// aten::special_polygamma(int n, Tensor self) -> Tensor
17347inline at::Tensor special_polygamma(int64_t n, const at::Tensor & self) {
17348 return at::_ops::special_polygamma::call(n, self);
17349}
17350
17351// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17352inline at::Tensor & special_polygamma_out(at::Tensor & out, int64_t n, const at::Tensor & self) {
17353 return at::_ops::special_polygamma_out::call(n, self, out);
17354}
17355// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17356inline at::Tensor & special_polygamma_outf(int64_t n, const at::Tensor & self, at::Tensor & out) {
17357 return at::_ops::special_polygamma_out::call(n, self, out);
17358}
17359
17360// aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
17361inline at::Tensor special_logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) {
17362 return at::_ops::special_logsumexp::call(self, dim, keepdim);
17363}
17364
17365// aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
17366inline at::Tensor & special_logsumexp_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) {
17367 return at::_ops::special_logsumexp_out::call(self, dim, keepdim, out);
17368}
17369// aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
17370inline at::Tensor & special_logsumexp_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
17371 return at::_ops::special_logsumexp_out::call(self, dim, keepdim, out);
17372}
17373
17374// aten::special_expit(Tensor self) -> Tensor
17376 return at::_ops::special_expit::call(self);
17377}
17378
17379// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17380inline at::Tensor & special_expit_out(at::Tensor & out, const at::Tensor & self) {
17381 return at::_ops::special_expit_out::call(self, out);
17382}
17383// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17385 return at::_ops::special_expit_out::call(self, out);
17386}
17387
17388// aten::special_sinc(Tensor self) -> Tensor
17389inline at::Tensor special_sinc(const at::Tensor & self) {
17390 return at::_ops::special_sinc::call(self);
17391}
17392
17393// aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17394inline at::Tensor & special_sinc_out(at::Tensor & out, const at::Tensor & self) {
17395 return at::_ops::special_sinc_out::call(self, out);
17396}
17397// aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17398inline at::Tensor & special_sinc_outf(const at::Tensor & self, at::Tensor & out) {
17399 return at::_ops::special_sinc_out::call(self, out);
17400}
17401
17402// aten::special_round(Tensor self, *, int decimals=0) -> Tensor
17403inline at::Tensor special_round(const at::Tensor & self, int64_t decimals=0) {
17404 return at::_ops::special_round::call(self, decimals);
17405}
17406
17407// aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)
17408inline at::Tensor & special_round_out(at::Tensor & out, const at::Tensor & self, int64_t decimals=0) {
17409 return at::_ops::special_round_out::call(self, decimals, out);
17410}
17411// aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)
17412inline at::Tensor & special_round_outf(const at::Tensor & self, int64_t decimals, at::Tensor & out) {
17413 return at::_ops::special_round_out::call(self, decimals, out);
17414}
17415
17416// aten::special_log1p(Tensor self) -> Tensor
17418 return at::_ops::special_log1p::call(self);
17419}
17420
17421// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17422inline at::Tensor & special_log1p_out(at::Tensor & out, const at::Tensor & self) {
17423 return at::_ops::special_log1p_out::call(self, out);
17424}
17425// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
17427 return at::_ops::special_log1p_out::call(self, out);
17428}
17429
17430// aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
17432 return at::_ops::special_log_softmax::call(self, dim, dtype);
17433}
17434
17435// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17436inline at::Tensor & special_gammainc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
17437 return at::_ops::special_gammainc_out::call(self, other, out);
17438}
17439// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17440inline at::Tensor & special_gammainc_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
17441 return at::_ops::special_gammainc_out::call(self, other, out);
17442}
17443
17444// aten::special_gammainc(Tensor self, Tensor other) -> Tensor
17445inline at::Tensor special_gammainc(const at::Tensor & self, const at::Tensor & other) {
17446 return at::_ops::special_gammainc::call(self, other);
17447}
17448
17449// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17450inline at::Tensor & special_gammaincc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
17451 return at::_ops::special_gammaincc_out::call(self, other, out);
17452}
17453// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17454inline at::Tensor & special_gammaincc_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
17455 return at::_ops::special_gammaincc_out::call(self, other, out);
17456}
17457
17458// aten::special_gammaincc(Tensor self, Tensor other) -> Tensor
17459inline at::Tensor special_gammaincc(const at::Tensor & self, const at::Tensor & other) {
17460 return at::_ops::special_gammaincc::call(self, other);
17461}
17462
17463// aten::special_multigammaln(Tensor self, int p) -> Tensor
17464inline at::Tensor special_multigammaln(const at::Tensor & self, int64_t p) {
17465 return at::_ops::special_multigammaln::call(self, p);
17466}
17467
17468// aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
17469inline at::Tensor & special_multigammaln_out(at::Tensor & out, const at::Tensor & self, int64_t p) {
17470 return at::_ops::special_multigammaln_out::call(self, p, out);
17471}
17472// aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
17473inline at::Tensor & special_multigammaln_outf(const at::Tensor & self, int64_t p, at::Tensor & out) {
17474 return at::_ops::special_multigammaln_out::call(self, p, out);
17475}
17476
17477// aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
17479 return at::_ops::special_softmax::call(self, dim, dtype);
17480}
17481
17482// aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
17484 return at::_ops::fft_fft::call(self, n, dim, norm);
17485}
17486
17487// aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17489 return at::_ops::fft_fft_out::call(self, n, dim, norm, out);
17490}
17491// aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17493 return at::_ops::fft_fft_out::call(self, n, dim, norm, out);
17494}
17495
17496// aten::fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
17498 return at::_ops::fft_ifft::call(self, n, dim, norm);
17499}
17500
17501// aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17503 return at::_ops::fft_ifft_out::call(self, n, dim, norm, out);
17504}
17505// aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17507 return at::_ops::fft_ifft_out::call(self, n, dim, norm, out);
17508}
17509
17510// aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
17512 return at::_ops::fft_rfft::call(self, n, dim, norm);
17513}
17514
17515// aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17517 return at::_ops::fft_rfft_out::call(self, n, dim, norm, out);
17518}
17519// aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17521 return at::_ops::fft_rfft_out::call(self, n, dim, norm, out);
17522}
17523
17524// aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
17526 return at::_ops::fft_irfft::call(self, n, dim, norm);
17527}
17528
17529// aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17531 return at::_ops::fft_irfft_out::call(self, n, dim, norm, out);
17532}
17533// aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17535 return at::_ops::fft_irfft_out::call(self, n, dim, norm, out);
17536}
17537
17538// aten::fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
17540 return at::_ops::fft_hfft::call(self, n, dim, norm);
17541}
17542
17543// aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17545 return at::_ops::fft_hfft_out::call(self, n, dim, norm, out);
17546}
17547// aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17549 return at::_ops::fft_hfft_out::call(self, n, dim, norm, out);
17550}
17551
17552// aten::fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor
17554 return at::_ops::fft_ihfft::call(self, n, dim, norm);
17555}
17556
17557// aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17559 return at::_ops::fft_ihfft_out::call(self, n, dim, norm, out);
17560}
17561// aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17563 return at::_ops::fft_ihfft_out::call(self, n, dim, norm, out);
17564}
17565
17566// aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
17567inline at::Tensor fft_fft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt) {
17568 return at::_ops::fft_fft2::call(self, s, dim, norm);
17569}
17570
17571// aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17572inline at::Tensor & fft_fft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt) {
17573 return at::_ops::fft_fft2_out::call(self, s, dim, norm, out);
17574}
17575// aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17576inline at::Tensor & fft_fft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
17577 return at::_ops::fft_fft2_out::call(self, s, dim, norm, out);
17578}
17579
17580// aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
17581inline at::Tensor fft_ifft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt) {
17582 return at::_ops::fft_ifft2::call(self, s, dim, norm);
17583}
17584
17585// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17586inline at::Tensor & fft_ifft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt) {
17587 return at::_ops::fft_ifft2_out::call(self, s, dim, norm, out);
17588}
17589// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17590inline at::Tensor & fft_ifft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
17591 return at::_ops::fft_ifft2_out::call(self, s, dim, norm, out);
17592}
17593
17594// aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
17595inline at::Tensor fft_rfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt) {
17596 return at::_ops::fft_rfft2::call(self, s, dim, norm);
17597}
17598
17599// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17600inline at::Tensor & fft_rfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt) {
17601 return at::_ops::fft_rfft2_out::call(self, s, dim, norm, out);
17602}
17603// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17604inline at::Tensor & fft_rfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
17605 return at::_ops::fft_rfft2_out::call(self, s, dim, norm, out);
17606}
17607
17608// aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
17609inline at::Tensor fft_irfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt) {
17610 return at::_ops::fft_irfft2::call(self, s, dim, norm);
17611}
17612
17613// aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17614inline at::Tensor & fft_irfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt) {
17615 return at::_ops::fft_irfft2_out::call(self, s, dim, norm, out);
17616}
17617// aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17618inline at::Tensor & fft_irfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
17619 return at::_ops::fft_irfft2_out::call(self, s, dim, norm, out);
17620}
17621
17622// aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
17623inline at::Tensor fft_hfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt) {
17624 return at::_ops::fft_hfft2::call(self, s, dim, norm);
17625}
17626
17627// aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17628inline const at::Tensor & fft_hfft2_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt) {
17629 return at::_ops::fft_hfft2_out::call(self, s, dim, norm, out);
17630}
17631// aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17632inline const at::Tensor & fft_hfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
17633 return at::_ops::fft_hfft2_out::call(self, s, dim, norm, out);
17634}
17635
17636// aten::fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
17637inline at::Tensor fft_ihfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt) {
17638 return at::_ops::fft_ihfft2::call(self, s, dim, norm);
17639}
17640
17641// aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17642inline const at::Tensor & fft_ihfft2_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt) {
17643 return at::_ops::fft_ihfft2_out::call(self, s, dim, norm, out);
17644}
17645// aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17646inline const at::Tensor & fft_ihfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
17647 return at::_ops::fft_ihfft2_out::call(self, s, dim, norm, out);
17648}
17649
17650// aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
17651inline at::Tensor fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt) {
17652 return at::_ops::fft_fftn::call(self, s, dim, norm);
17653}
17654
17655// aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17656inline at::Tensor & fft_fftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt) {
17657 return at::_ops::fft_fftn_out::call(self, s, dim, norm, out);
17658}
17659// aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17660inline at::Tensor & fft_fftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
17661 return at::_ops::fft_fftn_out::call(self, s, dim, norm, out);
17662}
17663
17664// aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
17665inline at::Tensor fft_ifftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt) {
17666 return at::_ops::fft_ifftn::call(self, s, dim, norm);
17667}
17668
17669// aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17670inline at::Tensor & fft_ifftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt) {
17671 return at::_ops::fft_ifftn_out::call(self, s, dim, norm, out);
17672}
17673// aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17674inline at::Tensor & fft_ifftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
17675 return at::_ops::fft_ifftn_out::call(self, s, dim, norm, out);
17676}
17677
17678// aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
17679inline at::Tensor fft_rfftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt) {
17680 return at::_ops::fft_rfftn::call(self, s, dim, norm);
17681}
17682
17683// aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17684inline at::Tensor & fft_rfftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt) {
17685 return at::_ops::fft_rfftn_out::call(self, s, dim, norm, out);
17686}
17687// aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17688inline at::Tensor & fft_rfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
17689 return at::_ops::fft_rfftn_out::call(self, s, dim, norm, out);
17690}
17691
17692// aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
17693inline at::Tensor fft_irfftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt) {
17694 return at::_ops::fft_irfftn::call(self, s, dim, norm);
17695}
17696
17697// aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17698inline at::Tensor & fft_irfftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt) {
17699 return at::_ops::fft_irfftn_out::call(self, s, dim, norm, out);
17700}
17701// aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17702inline at::Tensor & fft_irfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) {
17703 return at::_ops::fft_irfftn_out::call(self, s, dim, norm, out);
17704}
17705
17706// aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
17707inline at::Tensor fft_hfftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt) {
17708 return at::_ops::fft_hfftn::call(self, s, dim, norm);
17709}
17710
17711// aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17712inline const at::Tensor & fft_hfftn_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt) {
17713 return at::_ops::fft_hfftn_out::call(self, s, dim, norm, out);
17714}
17715// aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17716inline const at::Tensor & fft_hfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
17717 return at::_ops::fft_hfftn_out::call(self, s, dim, norm, out);
17718}
17719
17720// aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor
17721inline at::Tensor fft_ihfftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt) {
17722 return at::_ops::fft_ihfftn::call(self, s, dim, norm);
17723}
17724
17725// aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17726inline const at::Tensor & fft_ihfftn_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt) {
17727 return at::_ops::fft_ihfftn_out::call(self, s, dim, norm, out);
17728}
17729// aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
17730inline const at::Tensor & fft_ihfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) {
17731 return at::_ops::fft_ihfftn_out::call(self, s, dim, norm, out);
17732}
17733
17734// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
17735inline at::Tensor fft_fftfreq(int64_t n, double d=1.0, at::TensorOptions options={}) {
17736 return at::_ops::fft_fftfreq::call(n, d, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
17737}
17738// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
17740 return at::_ops::fft_fftfreq::call(n, d, dtype, layout, device, pin_memory);
17741}
17742
17743// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
17744inline at::Tensor & fft_fftfreq_out(at::Tensor & out, int64_t n, double d=1.0) {
17745 return at::_ops::fft_fftfreq_out::call(n, d, out);
17746}
17747// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
17748inline at::Tensor & fft_fftfreq_outf(int64_t n, double d, at::Tensor & out) {
17749 return at::_ops::fft_fftfreq_out::call(n, d, out);
17750}
17751
17752// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
17753inline at::Tensor fft_rfftfreq(int64_t n, double d=1.0, at::TensorOptions options={}) {
17754 return at::_ops::fft_rfftfreq::call(n, d, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
17755}
17756// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
17758 return at::_ops::fft_rfftfreq::call(n, d, dtype, layout, device, pin_memory);
17759}
17760
17761// aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
17762inline at::Tensor & fft_rfftfreq_out(at::Tensor & out, int64_t n, double d=1.0) {
17763 return at::_ops::fft_rfftfreq_out::call(n, d, out);
17764}
17765// aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
17766inline at::Tensor & fft_rfftfreq_outf(int64_t n, double d, at::Tensor & out) {
17767 return at::_ops::fft_rfftfreq_out::call(n, d, out);
17768}
17769
17770// aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor
17771inline at::Tensor fft_fftshift(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt) {
17772 return at::_ops::fft_fftshift::call(self, dim);
17773}
17774
17775// aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor
17776inline at::Tensor fft_ifftshift(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt) {
17777 return at::_ops::fft_ifftshift::call(self, dim);
17778}
17779
17780// aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
17781inline ::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false) {
17782 return at::_ops::linalg_cholesky_ex::call(self, upper, check_errors);
17783}
17784
17785// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
17786inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false) {
17787 return at::_ops::linalg_cholesky_ex_L::call(self, upper, check_errors, L, info);
17788}
17789// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
17790inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) {
17791 return at::_ops::linalg_cholesky_ex_L::call(self, upper, check_errors, L, info);
17792}
17793
17794// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor
17795inline at::Tensor linalg_cholesky(const at::Tensor & self, bool upper=false) {
17796 return at::_ops::linalg_cholesky::call(self, upper);
17797}
17798
17799// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
17800inline at::Tensor & linalg_cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper=false) {
17801 return at::_ops::linalg_cholesky_out::call(self, upper, out);
17802}
17803// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
17804inline at::Tensor & linalg_cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out) {
17805 return at::_ops::linalg_cholesky_out::call(self, upper, out);
17806}
17807
17808// aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor
17809inline at::Tensor linalg_cross(const at::Tensor & self, const at::Tensor & other, int64_t dim=-1) {
17810 return at::_ops::linalg_cross::call(self, other, dim);
17811}
17812
17813// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
17814inline at::Tensor & linalg_cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1) {
17815 return at::_ops::linalg_cross_out::call(self, other, dim, out);
17816}
17817// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
17818inline at::Tensor & linalg_cross_outf(const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) {
17819 return at::_ops::linalg_cross_out::call(self, other, dim, out);
17820}
17821
17822// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)
17823inline ::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor(const at::Tensor & A, bool pivot=true) {
17824 return at::_ops::linalg_lu_factor::call(A, pivot);
17825}
17826
17827// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
17828inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_out(at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot=true) {
17829 return at::_ops::linalg_lu_factor_out::call(A, pivot, LU, pivots);
17830}
17831// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)
17832inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_outf(const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) {
17833 return at::_ops::linalg_lu_factor_out::call(A, pivot, LU, pivots);
17834}
17835
17836// aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)
17837inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false) {
17838 return at::_ops::linalg_lu_factor_ex::call(A, pivot, check_errors);
17839}
17840
17841// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)
17842inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_out(at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot=true, bool check_errors=false) {
17843 return at::_ops::linalg_lu_factor_ex_out::call(A, pivot, check_errors, LU, pivots, info);
17844}
17845// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)
17846inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_outf(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
17847 return at::_ops::linalg_lu_factor_ex_out::call(A, pivot, check_errors, LU, pivots, info);
17848}
17849
17850// aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)
17851inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu(const at::Tensor & A, bool pivot=true) {
17852 return at::_ops::linalg_lu::call(A, pivot);
17853}
17854
17855// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
17856inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true) {
17857 return at::_ops::linalg_lu_out::call(A, pivot, P, L, U);
17858}
17859// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)
17860inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
17861 return at::_ops::linalg_lu_out::call(A, pivot, P, L, U);
17862}
17863
17864// aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor
17865inline at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false) {
17866 return at::_ops::linalg_lu_solve::call(LU, pivots, B, left, adjoint);
17867}
17868
17869// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
17870inline at::Tensor & linalg_lu_solve_out(at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false) {
17871 return at::_ops::linalg_lu_solve_out::call(LU, pivots, B, left, adjoint, out);
17872}
17873// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
17874inline at::Tensor & linalg_lu_solve_outf(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) {
17875 return at::_ops::linalg_lu_solve_out::call(LU, pivots, B, left, adjoint, out);
17876}
17877
17878// aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)
17879inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det(const at::Tensor & A) {
17880 return at::_ops::_linalg_det::call(A);
17881}
17882
17883// aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)
17884inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_det_out(at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A) {
17885 return at::_ops::_linalg_det_result::call(A, result, LU, pivots);
17886}
17887// aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)
17888inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_det_outf(const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) {
17889 return at::_ops::_linalg_det_result::call(A, result, LU, pivots);
17890}
17891
17892// aten::linalg_det(Tensor A) -> Tensor
17894 return at::_ops::linalg_det::call(A);
17895}
17896
17897// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
17899 return at::_ops::linalg_det_out::call(A, out);
17900}
17901// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
17903 return at::_ops::linalg_det_out::call(A, out);
17904}
17905
17906// aten::det(Tensor self) -> Tensor
17907inline at::Tensor det(const at::Tensor & self) {
17908 return at::_ops::det::call(self);
17909}
17910
17911// aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)
17912inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex(const at::Tensor & self, bool hermitian=false, bool check_errors=false) {
17913 return at::_ops::linalg_ldl_factor_ex::call(self, hermitian, check_errors);
17914}
17915
17916// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)
17917inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_out(at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false) {
17918 return at::_ops::linalg_ldl_factor_ex_out::call(self, hermitian, check_errors, LD, pivots, info);
17919}
17920// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)
17921inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_outf(const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) {
17922 return at::_ops::linalg_ldl_factor_ex_out::call(self, hermitian, check_errors, LD, pivots, info);
17923}
17924
17925// aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)
17926inline ::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor(const at::Tensor & self, bool hermitian=false) {
17927 return at::_ops::linalg_ldl_factor::call(self, hermitian);
17928}
17929
17930// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)
17931inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_out(at::Tensor & LD, at::Tensor & pivots, const at::Tensor & self, bool hermitian=false) {
17932 return at::_ops::linalg_ldl_factor_out::call(self, hermitian, LD, pivots);
17933}
17934// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)
17935inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_outf(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) {
17936 return at::_ops::linalg_ldl_factor_out::call(self, hermitian, LD, pivots);
17937}
17938
17939// aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor
17940inline at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) {
17941 return at::_ops::linalg_ldl_solve::call(LD, pivots, B, hermitian);
17942}
17943
17944// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
17945inline at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) {
17946 return at::_ops::linalg_ldl_solve_out::call(LD, pivots, B, hermitian, out);
17947}
17948// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
17949inline at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) {
17950 return at::_ops::linalg_ldl_solve_out::call(LD, pivots, B, hermitian, out);
17951}
17952
17953// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)
17954inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> linalg_lstsq(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond=c10::nullopt, c10::optional<c10::string_view> driver=c10::nullopt) {
17955 return at::_ops::linalg_lstsq::call(self, b, rcond, driver);
17956}
17957
17958// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
17959inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> linalg_lstsq_out(at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond=c10::nullopt, c10::optional<c10::string_view> driver=c10::nullopt) {
17960 return at::_ops::linalg_lstsq_out::call(self, b, rcond, driver, solution, residuals, rank, singular_values);
17961}
17962// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
17963inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> linalg_lstsq_outf(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) {
17964 return at::_ops::linalg_lstsq_out::call(self, b, rcond, driver, solution, residuals, rank, singular_values);
17965}
17966
17967// aten::linalg_matmul(Tensor self, Tensor other) -> Tensor
17968inline at::Tensor linalg_matmul(const at::Tensor & self, const at::Tensor & other) {
17969 return at::_ops::linalg_matmul::call(self, other);
17970}
17971
17972// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17973inline at::Tensor & linalg_matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
17974 return at::_ops::linalg_matmul_out::call(self, other, out);
17975}
17976// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
17977inline at::Tensor & linalg_matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
17978 return at::_ops::linalg_matmul_out::call(self, other, out);
17979}
17980
17981// aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor
17982inline at::Tensor linalg_vecdot(const at::Tensor & x, const at::Tensor & y, int64_t dim=-1) {
17983 return at::_ops::linalg_vecdot::call(x, y, dim);
17984}
17985
17986// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
17987inline at::Tensor & linalg_vecdot_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & y, int64_t dim=-1) {
17988 return at::_ops::linalg_vecdot_out::call(x, y, dim, out);
17989}
17990// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)
17991inline at::Tensor & linalg_vecdot_outf(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) {
17992 return at::_ops::linalg_vecdot_out::call(x, y, dim, out);
17993}
17994
17995// aten::linalg_matrix_exp(Tensor self) -> Tensor
17997 return at::_ops::linalg_matrix_exp::call(self);
17998}
17999
18000// aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)
18001inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet(const at::Tensor & A) {
18002 return at::_ops::_linalg_slogdet::call(A);
18003}
18004
18005// aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)
18006inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A) {
18007 return at::_ops::_linalg_slogdet_sign::call(A, sign, logabsdet, LU, pivots);
18008}
18009// aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)
18010inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) {
18011 return at::_ops::_linalg_slogdet_sign::call(A, sign, logabsdet, LU, pivots);
18012}
18013
18014// aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)
18015inline ::std::tuple<at::Tensor,at::Tensor> linalg_slogdet(const at::Tensor & A) {
18016 return at::_ops::linalg_slogdet::call(A);
18017}
18018
18019// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
18020inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & A) {
18021 return at::_ops::linalg_slogdet_out::call(A, sign, logabsdet);
18022}
18023// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
18024inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) {
18025 return at::_ops::linalg_slogdet_out::call(A, sign, logabsdet);
18026}
18027
18028// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
18029inline ::std::tuple<at::Tensor,at::Tensor> slogdet(const at::Tensor & self) {
18030 return at::_ops::slogdet::call(self);
18031}
18032
18033// aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
18034inline ::std::tuple<at::Tensor &,at::Tensor &> slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & self) {
18035 return at::_ops::slogdet_out::call(self, sign, logabsdet);
18036}
18037// aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
18038inline ::std::tuple<at::Tensor &,at::Tensor &> slogdet_outf(const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) {
18039 return at::_ops::slogdet_out::call(self, sign, logabsdet);
18040}
18041
18042// aten::logdet(Tensor self) -> Tensor
18043inline at::Tensor logdet(const at::Tensor & self) {
18044 return at::_ops::logdet::call(self);
18045}
18046
18047// aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)
18048inline ::std::tuple<at::Tensor,at::Tensor> linalg_eig(const at::Tensor & self) {
18049 return at::_ops::linalg_eig::call(self);
18050}
18051
18052// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
18053inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self) {
18054 return at::_ops::linalg_eig_out::call(self, eigenvalues, eigenvectors);
18055}
18056// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
18057inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_outf(const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
18058 return at::_ops::linalg_eig_out::call(self, eigenvalues, eigenvectors);
18059}
18060
18061// aten::linalg_eigvals(Tensor self) -> Tensor
18063 return at::_ops::linalg_eigvals::call(self);
18064}
18065
18066// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
18068 return at::_ops::linalg_eigvals_out::call(self, out);
18069}
18070// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
18072 return at::_ops::linalg_eigvals_out::call(self, out);
18073}
18074
18075// aten::_linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)
18076inline ::std::tuple<at::Tensor,at::Tensor> _linalg_eigh(const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true) {
18077 return at::_ops::_linalg_eigh::call(A, UPLO, compute_v);
18078}
18079
18080// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
18081inline ::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true) {
18082 return at::_ops::_linalg_eigh_eigenvalues::call(A, UPLO, compute_v, eigenvalues, eigenvectors);
18083}
18084// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
18085inline ::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_outf(const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
18086 return at::_ops::_linalg_eigh_eigenvalues::call(A, UPLO, compute_v, eigenvalues, eigenvectors);
18087}
18088
18089// aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors)
18090inline ::std::tuple<at::Tensor,at::Tensor> linalg_eigh(const at::Tensor & self, c10::string_view UPLO="L") {
18091 return at::_ops::linalg_eigh::call(self, UPLO);
18092}
18093
18094// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
18095inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_out(at::Tensor & eigvals, at::Tensor & eigvecs, const at::Tensor & self, c10::string_view UPLO="L") {
18096 return at::_ops::linalg_eigh_eigvals::call(self, UPLO, eigvals, eigvecs);
18097}
18098// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
18099inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) {
18100 return at::_ops::linalg_eigh_eigvals::call(self, UPLO, eigvals, eigvecs);
18101}
18102
18103// aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor
18104inline at::Tensor linalg_eigvalsh(const at::Tensor & self, c10::string_view UPLO="L") {
18105 return at::_ops::linalg_eigvalsh::call(self, UPLO);
18106}
18107
18108// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!)
18109inline at::Tensor & linalg_eigvalsh_out(at::Tensor & out, const at::Tensor & self, c10::string_view UPLO="L") {
18110 return at::_ops::linalg_eigvalsh_out::call(self, UPLO, out);
18111}
18112// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!)
18113inline at::Tensor & linalg_eigvalsh_outf(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) {
18114 return at::_ops::linalg_eigvalsh_out::call(self, UPLO, out);
18115}
18116
18117// aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor
18119 return at::_ops::linalg_householder_product::call(input, tau);
18120}
18121
18122// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)
18123inline at::Tensor & linalg_householder_product_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tau) {
18124 return at::_ops::linalg_householder_product_out::call(input, tau, out);
18125}
18126// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)
18128 return at::_ops::linalg_householder_product_out::call(input, tau, out);
18129}
18130
18131// aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)
18132inline ::std::tuple<at::Tensor,at::Tensor> linalg_inv_ex(const at::Tensor & A, bool check_errors=false) {
18133 return at::_ops::linalg_inv_ex::call(A, check_errors);
18134}
18135
18136// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
18137inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false) {
18138 return at::_ops::linalg_inv_ex_inverse::call(A, check_errors, inverse, info);
18139}
18140// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
18141inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) {
18142 return at::_ops::linalg_inv_ex_inverse::call(A, check_errors, inverse, info);
18143}
18144
18145// aten::linalg_inv(Tensor A) -> Tensor
18147 return at::_ops::linalg_inv::call(A);
18148}
18149
18150// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
18152 return at::_ops::linalg_inv_out::call(A, out);
18153}
18154// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)
18156 return at::_ops::linalg_inv_out::call(A, out);
18157}
18158
18159// aten::inverse(Tensor self) -> Tensor
18160inline at::Tensor inverse(const at::Tensor & self) {
18161 return at::_ops::inverse::call(self);
18162}
18163
18164// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
18165inline at::Tensor & inverse_out(at::Tensor & out, const at::Tensor & self) {
18166 return at::_ops::inverse_out::call(self, out);
18167}
18168// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
18169inline at::Tensor & inverse_outf(const at::Tensor & self, at::Tensor & out) {
18170 return at::_ops::inverse_out::call(self, out);
18171}
18172
18173// aten::inner(Tensor self, Tensor other) -> Tensor
18174inline at::Tensor inner(const at::Tensor & self, const at::Tensor & other) {
18175 return at::_ops::inner::call(self, other);
18176}
18177
18178// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
18179inline at::Tensor & inner_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
18180 return at::_ops::inner_out::call(self, other, out);
18181}
18182// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
18183inline at::Tensor & inner_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
18184 return at::_ops::inner_out::call(self, other, out);
18185}
18186
18187// aten::outer(Tensor self, Tensor vec2) -> Tensor
18188inline at::Tensor outer(const at::Tensor & self, const at::Tensor & vec2) {
18189 return at::_ops::outer::call(self, vec2);
18190}
18191
18192// aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
18193inline at::Tensor & outer_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) {
18194 return at::_ops::outer_out::call(self, vec2, out);
18195}
18196// aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
18197inline at::Tensor & outer_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
18198 return at::_ops::outer_out::call(self, vec2, out);
18199}
18200
18201// aten::ger(Tensor self, Tensor vec2) -> Tensor
18202inline at::Tensor ger(const at::Tensor & self, const at::Tensor & vec2) {
18203 return at::_ops::ger::call(self, vec2);
18204}
18205
18206// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
18207inline at::Tensor & ger_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) {
18208 return at::_ops::ger_out::call(self, vec2, out);
18209}
18210// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
18211inline at::Tensor & ger_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
18212 return at::_ops::ger_out::call(self, vec2, out);
18213}
18214
18215// aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
18216inline at::Tensor linalg_norm(const at::Tensor & self, const c10::optional<at::Scalar> & ord=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
18217 return at::_ops::linalg_norm::call(self, ord, dim, keepdim, dtype);
18218}
18219
18220// aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
18221inline at::Tensor linalg_norm(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
18222 return at::_ops::linalg_norm_ord_str::call(self, ord, dim, keepdim, dtype);
18223}
18224
18225// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
18226inline at::Tensor & linalg_norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & ord=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
18227 return at::_ops::linalg_norm_out::call(self, ord, dim, keepdim, dtype, out);
18228}
18229// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
18230inline at::Tensor & linalg_norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
18231 return at::_ops::linalg_norm_out::call(self, ord, dim, keepdim, dtype, out);
18232}
18233
18234// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
18235inline at::Tensor & linalg_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
18236 return at::_ops::linalg_norm_ord_str_out::call(self, ord, dim, keepdim, dtype, out);
18237}
18238// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
18239inline at::Tensor & linalg_norm_outf(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
18240 return at::_ops::linalg_norm_ord_str_out::call(self, ord, dim, keepdim, dtype, out);
18241}
18242
18243// aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
18244inline at::Tensor linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
18245 return at::_ops::linalg_vector_norm::call(self, ord, dim, keepdim, dtype);
18246}
18247
18248// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
18249inline at::Tensor & linalg_vector_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
18250 return at::_ops::linalg_vector_norm_out::call(self, ord, dim, keepdim, dtype, out);
18251}
18252// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
18253inline at::Tensor & linalg_vector_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
18254 return at::_ops::linalg_vector_norm_out::call(self, ord, dim, keepdim, dtype, out);
18255}
18256
18257// aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
18258inline at::Tensor linalg_matrix_norm(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
18259 return at::_ops::linalg_matrix_norm::call(self, ord, dim, keepdim, dtype);
18260}
18261
18262// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
18263inline at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
18264 return at::_ops::linalg_matrix_norm_out::call(self, ord, dim, keepdim, dtype, out);
18265}
18266// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
18267inline at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
18268 return at::_ops::linalg_matrix_norm_out::call(self, ord, dim, keepdim, dtype, out);
18269}
18270
18271// aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
18272inline at::Tensor linalg_matrix_norm(const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
18273 return at::_ops::linalg_matrix_norm_str_ord::call(self, ord, dim, keepdim, dtype);
18274}
18275
18276// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
18277inline at::Tensor & linalg_matrix_norm_out(at::Tensor & out, const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
18278 return at::_ops::linalg_matrix_norm_str_ord_out::call(self, ord, dim, keepdim, dtype, out);
18279}
18280// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
18281inline at::Tensor & linalg_matrix_norm_outf(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
18282 return at::_ops::linalg_matrix_norm_str_ord_out::call(self, ord, dim, keepdim, dtype, out);
18283}
18284
18285// aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
18286inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd(const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, c10::optional<c10::string_view> driver=c10::nullopt) {
18287 return at::_ops::_linalg_svd::call(A, full_matrices, compute_uv, driver);
18288}
18289
18290// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
18291inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, c10::optional<c10::string_view> driver=c10::nullopt) {
18292 return at::_ops::_linalg_svd_U::call(A, full_matrices, compute_uv, driver, U, S, Vh);
18293}
18294// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
18295inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_outf(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
18296 return at::_ops::_linalg_svd_U::call(A, full_matrices, compute_uv, driver, U, S, Vh);
18297}
18298
18299// aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)
18300inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_svd(const at::Tensor & A, bool full_matrices=true, c10::optional<c10::string_view> driver=c10::nullopt) {
18301 return at::_ops::linalg_svd::call(A, full_matrices, driver);
18302}
18303
18304// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
18305inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=true, c10::optional<c10::string_view> driver=c10::nullopt) {
18306 return at::_ops::linalg_svd_U::call(A, full_matrices, driver, U, S, Vh);
18307}
18308// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
18309inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_svd_outf(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
18310 return at::_ops::linalg_svd_U::call(A, full_matrices, driver, U, S, Vh);
18311}
18312
18313// aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor
18315 return at::_ops::linalg_svdvals::call(A, driver);
18316}
18317
18318// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)
18320 return at::_ops::linalg_svdvals_out::call(A, driver, out);
18321}
18322// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)
18324 return at::_ops::linalg_svdvals_out::call(A, driver, out);
18325}
18326
18327// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor
18329 return at::_ops::linalg_cond::call(self, p);
18330}
18331
18332// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)
18334 return at::_ops::linalg_cond_out::call(self, p, out);
18335}
18336// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)
18338 return at::_ops::linalg_cond_out::call(self, p, out);
18339}
18340
18341// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor
18342inline at::Tensor linalg_cond(const at::Tensor & self, c10::string_view p) {
18343 return at::_ops::linalg_cond_p_str::call(self, p);
18344}
18345
18346// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)
18347inline at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, c10::string_view p) {
18348 return at::_ops::linalg_cond_p_str_out::call(self, p, out);
18349}
18350// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)
18351inline at::Tensor & linalg_cond_outf(const at::Tensor & self, c10::string_view p, at::Tensor & out) {
18352 return at::_ops::linalg_cond_p_str_out::call(self, p, out);
18353}
18354
18355// aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
18356inline at::Tensor linalg_pinv(const at::Tensor & self, const c10::optional<at::Tensor> & atol={}, const c10::optional<at::Tensor> & rtol={}, bool hermitian=false) {
18357 return at::_ops::linalg_pinv_atol_rtol_tensor::call(self, atol, rtol, hermitian);
18358}
18359
18360// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
18361inline at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Tensor> & atol={}, const c10::optional<at::Tensor> & rtol={}, bool hermitian=false) {
18362 return at::_ops::linalg_pinv_atol_rtol_tensor_out::call(self, atol, rtol, hermitian, out);
18363}
18364// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
18365inline at::Tensor & linalg_pinv_outf(const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
18366 return at::_ops::linalg_pinv_atol_rtol_tensor_out::call(self, atol, rtol, hermitian, out);
18367}
18368
18369// aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
18370inline at::Tensor linalg_pinv(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian=false) {
18371 return at::_ops::linalg_pinv_atol_rtol_float::call(self, atol, rtol, hermitian);
18372}
18373
18374// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
18375inline at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian=false) {
18376 return at::_ops::linalg_pinv_atol_rtol_float_out::call(self, atol, rtol, hermitian, out);
18377}
18378// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
18379inline at::Tensor & linalg_pinv_outf(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) {
18380 return at::_ops::linalg_pinv_atol_rtol_float_out::call(self, atol, rtol, hermitian, out);
18381}
18382
18383// aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor
18384inline at::Tensor linalg_pinv(const at::Tensor & self, double rcond, bool hermitian=false) {
18385 return at::_ops::linalg_pinv::call(self, rcond, hermitian);
18386}
18387
18388// aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor
18389inline at::Tensor linalg_pinv(const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false) {
18390 return at::_ops::linalg_pinv_rcond_tensor::call(self, rcond, hermitian);
18391}
18392
18393// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
18394inline at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, double rcond, bool hermitian=false) {
18395 return at::_ops::linalg_pinv_out::call(self, rcond, hermitian, out);
18396}
18397// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
18398inline at::Tensor & linalg_pinv_outf(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) {
18399 return at::_ops::linalg_pinv_out::call(self, rcond, hermitian, out);
18400}
18401
18402// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
18403inline at::Tensor & linalg_pinv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false) {
18404 return at::_ops::linalg_pinv_out_rcond_tensor::call(self, rcond, hermitian, out);
18405}
18406// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
18407inline at::Tensor & linalg_pinv_outf(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) {
18408 return at::_ops::linalg_pinv_out_rcond_tensor::call(self, rcond, hermitian, out);
18409}
18410
18411// aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)
18412inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) {
18413 return at::_ops::_linalg_solve_ex::call(A, B, left, check_errors);
18414}
18415
18416// aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)
18417inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_out(at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) {
18418 return at::_ops::_linalg_solve_ex_result::call(A, B, left, check_errors, result, LU, pivots, info);
18419}
18420// aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)
18421inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_outf(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
18422 return at::_ops::_linalg_solve_ex_result::call(A, B, left, check_errors, result, LU, pivots, info);
18423}
18424
18425// aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)
18426inline ::std::tuple<at::Tensor,at::Tensor> linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) {
18427 return at::_ops::linalg_solve_ex::call(A, B, left, check_errors);
18428}
18429
18430// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)
18431inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_solve_ex_out(at::Tensor & result, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) {
18432 return at::_ops::linalg_solve_ex_out::call(A, B, left, check_errors, result, info);
18433}
18434// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)
18435inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_solve_ex_outf(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) {
18436 return at::_ops::linalg_solve_ex_out::call(A, B, left, check_errors, result, info);
18437}
18438
18439// aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor
18440inline at::Tensor linalg_solve(const at::Tensor & A, const at::Tensor & B, bool left=true) {
18441 return at::_ops::linalg_solve::call(A, B, left);
18442}
18443
18444// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)
18445inline at::Tensor & linalg_solve_out(at::Tensor & out, const at::Tensor & A, const at::Tensor & B, bool left=true) {
18446 return at::_ops::linalg_solve_out::call(A, B, left, out);
18447}
18448// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)
18449inline at::Tensor & linalg_solve_outf(const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) {
18450 return at::_ops::linalg_solve_out::call(A, B, left, out);
18451}
18452
18453// aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor
18454inline at::Tensor linalg_tensorinv(const at::Tensor & self, int64_t ind=2) {
18455 return at::_ops::linalg_tensorinv::call(self, ind);
18456}
18457
18458// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)
18459inline at::Tensor & linalg_tensorinv_out(at::Tensor & out, const at::Tensor & self, int64_t ind=2) {
18460 return at::_ops::linalg_tensorinv_out::call(self, ind, out);
18461}
18462// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)
18463inline at::Tensor & linalg_tensorinv_outf(const at::Tensor & self, int64_t ind, at::Tensor & out) {
18464 return at::_ops::linalg_tensorinv_out::call(self, ind, out);
18465}
18466
18467// aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor
18468inline at::Tensor linalg_tensorsolve(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims=c10::nullopt) {
18469 return at::_ops::linalg_tensorsolve::call(self, other, dims);
18470}
18471
18472// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)
18473inline at::Tensor & linalg_tensorsolve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims=c10::nullopt) {
18474 return at::_ops::linalg_tensorsolve_out::call(self, other, dims, out);
18475}
18476// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)
18477inline at::Tensor & linalg_tensorsolve_outf(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) {
18478 return at::_ops::linalg_tensorsolve_out::call(self, other, dims, out);
18479}
18480
18481// aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)
18482inline ::std::tuple<at::Tensor,at::Tensor> linalg_qr(const at::Tensor & A, c10::string_view mode="reduced") {
18483 return at::_ops::linalg_qr::call(A, mode);
18484}
18485
18486// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
18487inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & A, c10::string_view mode="reduced") {
18488 return at::_ops::linalg_qr_out::call(A, mode, Q, R);
18489}
18490// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
18491inline ::std::tuple<at::Tensor &,at::Tensor &> linalg_qr_outf(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) {
18492 return at::_ops::linalg_qr_out::call(A, mode, Q, R);
18493}
18494
18495// aten::linalg_matrix_power(Tensor self, int n) -> Tensor
18496inline at::Tensor linalg_matrix_power(const at::Tensor & self, int64_t n) {
18497 return at::_ops::linalg_matrix_power::call(self, n);
18498}
18499
18500// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
18501inline at::Tensor & linalg_matrix_power_out(at::Tensor & out, const at::Tensor & self, int64_t n) {
18502 return at::_ops::linalg_matrix_power_out::call(self, n, out);
18503}
18504// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)
18505inline at::Tensor & linalg_matrix_power_outf(const at::Tensor & self, int64_t n, at::Tensor & out) {
18506 return at::_ops::linalg_matrix_power_out::call(self, n, out);
18507}
18508
18509// aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor
18510inline at::Tensor linalg_matrix_rank(const at::Tensor & input, const c10::optional<at::Tensor> & atol={}, const c10::optional<at::Tensor> & rtol={}, bool hermitian=false) {
18511 return at::_ops::linalg_matrix_rank_atol_rtol_tensor::call(input, atol, rtol, hermitian);
18512}
18513
18514// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
18515inline at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & input, const c10::optional<at::Tensor> & atol={}, const c10::optional<at::Tensor> & rtol={}, bool hermitian=false) {
18516 return at::_ops::linalg_matrix_rank_atol_rtol_tensor_out::call(input, atol, rtol, hermitian, out);
18517}
18518// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
18519inline at::Tensor & linalg_matrix_rank_outf(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
18520 return at::_ops::linalg_matrix_rank_atol_rtol_tensor_out::call(input, atol, rtol, hermitian, out);
18521}
18522
18523// aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor
18524inline at::Tensor linalg_matrix_rank(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian=false) {
18525 return at::_ops::linalg_matrix_rank_atol_rtol_float::call(self, atol, rtol, hermitian);
18526}
18527
18528// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
18529inline at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian=false) {
18530 return at::_ops::linalg_matrix_rank_atol_rtol_float_out::call(self, atol, rtol, hermitian, out);
18531}
18532// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
18534 return at::_ops::linalg_matrix_rank_atol_rtol_float_out::call(self, atol, rtol, hermitian, out);
18535}
18536
18537// aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor
18538inline at::Tensor linalg_matrix_rank(const at::Tensor & self, double tol, bool hermitian=false) {
18539 return at::_ops::linalg_matrix_rank::call(self, tol, hermitian);
18540}
18541
18542// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
18543inline at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & self, double tol, bool hermitian=false) {
18544 return at::_ops::linalg_matrix_rank_out::call(self, tol, hermitian, out);
18545}
18546// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
18547inline at::Tensor & linalg_matrix_rank_outf(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) {
18548 return at::_ops::linalg_matrix_rank_out::call(self, tol, hermitian, out);
18549}
18550
18551// aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor
18552inline at::Tensor linalg_matrix_rank(const at::Tensor & input, const at::Tensor & tol, bool hermitian=false) {
18553 return at::_ops::linalg_matrix_rank_tol_tensor::call(input, tol, hermitian);
18554}
18555
18556// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
18557inline at::Tensor & linalg_matrix_rank_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & tol, bool hermitian=false) {
18558 return at::_ops::linalg_matrix_rank_out_tol_tensor::call(input, tol, hermitian, out);
18559}
18560// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)
18561inline at::Tensor & linalg_matrix_rank_outf(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) {
18562 return at::_ops::linalg_matrix_rank_out_tol_tensor::call(input, tol, hermitian, out);
18563}
18564
18565// aten::linalg_multi_dot(Tensor[] tensors) -> Tensor
18567 return at::_ops::linalg_multi_dot::call(tensors);
18568}
18569
18570// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
18572 return at::_ops::linalg_multi_dot_out::call(tensors, out);
18573}
18574// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
18576 return at::_ops::linalg_multi_dot_out::call(tensors, out);
18577}
18578
18579// aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor
18580inline at::Tensor nested_to_padded_tensor(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt) {
18581 return at::_ops::nested_to_padded_tensor::call(self, padding, output_size);
18582}
18583
18584// aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor
18585inline at::Tensor _test_serialization_subcmul(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) {
18586 return at::_ops::_test_serialization_subcmul::call(self, other, alpha);
18587}
18588
18589// aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor
18590inline at::Tensor _test_optional_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends) {
18591 return at::_ops::_test_optional_intlist::call(values, addends);
18592}
18593
18594// aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor
18595inline at::Tensor _test_optional_filled_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends) {
18596 return at::_ops::_test_optional_filled_intlist::call(values, addends);
18597}
18598
18599// aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor
18600inline at::Tensor _test_optional_floatlist(const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends) {
18601 return at::_ops::_test_optional_floatlist::call(values, addends);
18602}
18603
18604// aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
18605inline at::Tensor _test_string_default(const at::Tensor & dummy, c10::string_view a="\"'\\", c10::string_view b="\"'\\") {
18606 return at::_ops::_test_string_default::call(dummy, a, b);
18607}
18608
18609// aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor
18610inline at::Tensor _test_ambiguous_defaults(const at::Tensor & dummy, int64_t a=1, int64_t b=1) {
18611 return at::_ops::_test_ambiguous_defaults_a::call(dummy, a, b);
18612}
18613
18614// aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor
18615inline at::Tensor _test_ambiguous_defaults(const at::Tensor & dummy, int64_t a, c10::string_view b) {
18616 return at::_ops::_test_ambiguous_defaults_b::call(dummy, a, b);
18617}
18618
18619// aten::_test_warn_in_autograd(Tensor self) -> Tensor
18621 return at::_ops::_test_warn_in_autograd::call(self);
18622}
18623
18624// aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor
18626 return at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self);
18627}
18628
18629// aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor
18631 return at::_ops::_test_autograd_multiple_dispatch_ntonly::call(self, b);
18632}
18633
18634// aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)
18636 return at::_ops::_test_autograd_multiple_dispatch_view::call(self);
18637}
18638
18639// aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor
18641 return at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self);
18642}
18643
18644// aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor
18645inline at::Tensor segment_reduce(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths={}, const c10::optional<at::Tensor> & indices={}, const c10::optional<at::Tensor> & offsets={}, int64_t axis=0, bool unsafe=false, const c10::optional<at::Scalar> & initial=c10::nullopt) {
18646 return at::_ops::segment_reduce::call(data, reduce, lengths, indices, offsets, axis, unsafe, initial);
18647}
18648
18649// aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor
18650inline at::Tensor _segment_reduce_backward(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths={}, const c10::optional<at::Tensor> & offsets={}, int64_t axis=0, const c10::optional<at::Scalar> & initial=c10::nullopt) {
18651 return at::_ops::_segment_reduce_backward::call(grad, output, data, reduce, lengths, offsets, axis, initial);
18652}
18653
18654// aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor
18655inline at::Tensor pad_sequence(at::TensorList sequences, bool batch_first=false, double padding_value=0.0) {
18656 return at::_ops::pad_sequence::call(sequences, batch_first, padding_value);
18657}
18658
18659// aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor
18661 return at::_ops::flatten_dense_tensors::call(tensors);
18662}
18663
18664// aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]
18665inline ::std::vector<at::Tensor> unflatten_dense_tensors(const at::Tensor & flat, at::TensorList tensors) {
18666 return at::_ops::unflatten_dense_tensors::call(flat, tensors);
18667}
18668
18669// aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
18671 return at::_ops::_nested_tensor_from_tensor_list::call(list, dtype, layout, device, pin_memory);
18672}
18673
18674// aten::_fw_primal_copy(Tensor self, int level) -> Tensor
18675inline at::Tensor _fw_primal_copy(const at::Tensor & self, int64_t level) {
18676 return at::_ops::_fw_primal_copy::call(self, level);
18677}
18678
18679// aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor
18680inline at::Tensor _make_dual_copy(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
18681 return at::_ops::_make_dual_copy::call(primal, tangent, level);
18682}
18683
18684// aten::view_as_real_copy(Tensor self) -> Tensor
18686 return at::_ops::view_as_real_copy::call(self);
18687}
18688
18689// aten::view_as_complex_copy(Tensor self) -> Tensor
18691 return at::_ops::view_as_complex_copy::call(self);
18692}
18693
18694// aten::_conj_copy(Tensor self) -> Tensor
18695inline at::Tensor _conj_copy(const at::Tensor & self) {
18696 return at::_ops::_conj_copy::call(self);
18697}
18698
18699// aten::_neg_view_copy(Tensor self) -> Tensor
18701 return at::_ops::_neg_view_copy::call(self);
18702}
18703
18704// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
18705inline at::Tensor as_strided_copy(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) {
18706 return at::_ops::as_strided_copy::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
18707}
18708namespace symint {
18709 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
18710 at::Tensor as_strided_copy(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) {
18711 return at::_ops::as_strided_copy::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt);
18712 }
18713}
18714
18715// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor
18716inline at::Tensor as_strided_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) {
18717 return at::_ops::as_strided_copy::call(self, size, stride, storage_offset);
18718}
18719namespace symint {
18720 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
18721 at::Tensor as_strided_copy(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) {
18722 return at::_ops::as_strided_copy::call(self, size, stride, storage_offset);
18723 }
18724}
18725
18726// aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor
18727inline at::Tensor _sparse_broadcast_to_copy(const at::Tensor & self, at::IntArrayRef size) {
18728 return at::_ops::_sparse_broadcast_to_copy::call(self, size);
18729}
18730
18731// aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor
18732inline at::Tensor diagonal_copy(const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) {
18733 return at::_ops::diagonal_copy::call(self, offset, dim1, dim2);
18734}
18735
18736// aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
18737inline at::Tensor expand_copy(const at::Tensor & self, at::IntArrayRef size, bool implicit=false) {
18738 return at::_ops::expand_copy::call(self, c10::fromIntArrayRefSlow(size), implicit);
18739}
18740namespace symint {
18741 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
18742 at::Tensor expand_copy(const at::Tensor & self, at::IntArrayRef size, bool implicit=false) {
18743 return at::_ops::expand_copy::call(self, c10::fromIntArrayRefSlow(size), implicit);
18744 }
18745}
18746
18747// aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
18748inline at::Tensor expand_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit=false) {
18749 return at::_ops::expand_copy::call(self, size, implicit);
18750}
18751namespace symint {
18752 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
18753 at::Tensor expand_copy(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit=false) {
18754 return at::_ops::expand_copy::call(self, size, implicit);
18755 }
18756}
18757
18758// aten::permute_copy(Tensor self, int[] dims) -> Tensor
18759inline at::Tensor permute_copy(const at::Tensor & self, at::IntArrayRef dims) {
18760 return at::_ops::permute_copy::call(self, dims);
18761}
18762
18763// aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
18764inline at::Tensor _reshape_alias_copy(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
18765 return at::_ops::_reshape_alias_copy::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
18766}
18767namespace symint {
18768 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
18769 at::Tensor _reshape_alias_copy(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
18770 return at::_ops::_reshape_alias_copy::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
18771 }
18772}
18773
18774// aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor
18775inline at::Tensor _reshape_alias_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
18776 return at::_ops::_reshape_alias_copy::call(self, size, stride);
18777}
18778namespace symint {
18779 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
18780 at::Tensor _reshape_alias_copy(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
18781 return at::_ops::_reshape_alias_copy::call(self, size, stride);
18782 }
18783}
18784
18785// aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor
18786inline at::Tensor select_copy(const at::Tensor & self, int64_t dim, int64_t index) {
18787 return at::_ops::select_copy_int::call(self, dim, index);
18788}
18789namespace symint {
18790 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
18791 at::Tensor select_copy(const at::Tensor & self, int64_t dim, int64_t index) {
18792 return at::_ops::select_copy_int::call(self, dim, index);
18793 }
18794}
18795
18796// aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor
18797inline at::Tensor select_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt index) {
18798 return at::_ops::select_copy_int::call(self, dim, index);
18799}
18800namespace symint {
18801 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
18802 at::Tensor select_copy(const at::Tensor & self, int64_t dim, c10::SymInt index) {
18803 return at::_ops::select_copy_int::call(self, dim, index);
18804 }
18805}
18806
18807// aten::detach_copy(Tensor self) -> Tensor
18808inline at::Tensor detach_copy(const at::Tensor & self) {
18809 return at::_ops::detach_copy::call(self);
18810}
18811
18812// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
18813inline at::Tensor slice_copy(const at::Tensor & self, int64_t dim=0, c10::optional<int64_t> start=c10::nullopt, c10::optional<int64_t> end=c10::nullopt, int64_t step=1) {
18814 return at::_ops::slice_copy_Tensor::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step);
18815}
18816namespace symint {
18817 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
18819 return at::_ops::slice_copy_Tensor::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step);
18820 }
18821}
18822
18823// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor
18825 return at::_ops::slice_copy_Tensor::call(self, dim, start, end, step);
18826}
18827namespace symint {
18828 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
18830 return at::_ops::slice_copy_Tensor::call(self, dim, start, end, step);
18831 }
18832}
18833
18834// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
18835inline ::std::vector<at::Tensor> split_copy(const at::Tensor & self, int64_t split_size, int64_t dim=0) {
18836 return at::_ops::split_copy_Tensor::call(self, split_size, dim);
18837}
18838namespace symint {
18839 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
18840 ::std::vector<at::Tensor> split_copy(const at::Tensor & self, int64_t split_size, int64_t dim=0) {
18841 return at::_ops::split_copy_Tensor::call(self, split_size, dim);
18842 }
18843}
18844
18845// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]
18846inline ::std::vector<at::Tensor> split_copy_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) {
18847 return at::_ops::split_copy_Tensor::call(self, split_size, dim);
18848}
18849namespace symint {
18850 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
18851 ::std::vector<at::Tensor> split_copy(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) {
18852 return at::_ops::split_copy_Tensor::call(self, split_size, dim);
18853 }
18854}
18855
18856// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
18857inline ::std::vector<at::Tensor> split_with_sizes_copy(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) {
18858 return at::_ops::split_with_sizes_copy::call(self, c10::fromIntArrayRefSlow(split_sizes), dim);
18859}
18860namespace symint {
18861 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
18862 ::std::vector<at::Tensor> split_with_sizes_copy(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) {
18863 return at::_ops::split_with_sizes_copy::call(self, c10::fromIntArrayRefSlow(split_sizes), dim);
18864 }
18865}
18866
18867// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]
18868inline ::std::vector<at::Tensor> split_with_sizes_copy_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) {
18869 return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim);
18870}
18871namespace symint {
18872 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
18873 ::std::vector<at::Tensor> split_with_sizes_copy(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) {
18874 return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim);
18875 }
18876}
18877
18878// aten::squeeze_copy(Tensor self) -> Tensor
18879inline at::Tensor squeeze_copy(const at::Tensor & self) {
18880 return at::_ops::squeeze_copy::call(self);
18881}
18882
18883// aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor
18884inline at::Tensor squeeze_copy(const at::Tensor & self, int64_t dim) {
18885 return at::_ops::squeeze_copy_dim::call(self, dim);
18886}
18887
18888// aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor
18889inline at::Tensor squeeze_copy(const at::Tensor & self, at::IntArrayRef dim) {
18890 return at::_ops::squeeze_copy_dims::call(self, dim);
18891}
18892
18893// aten::t_copy(Tensor self) -> Tensor
18894inline at::Tensor t_copy(const at::Tensor & self) {
18895 return at::_ops::t_copy::call(self);
18896}
18897
18898// aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor
18899inline at::Tensor transpose_copy(const at::Tensor & self, int64_t dim0, int64_t dim1) {
18900 return at::_ops::transpose_copy_int::call(self, dim0, dim1);
18901}
18902
18903// aten::unsqueeze_copy(Tensor self, int dim) -> Tensor
18904inline at::Tensor unsqueeze_copy(const at::Tensor & self, int64_t dim) {
18905 return at::_ops::unsqueeze_copy::call(self, dim);
18906}
18907
18908// aten::_indices_copy(Tensor self) -> Tensor
18910 return at::_ops::_indices_copy::call(self);
18911}
18912
18913// aten::_values_copy(Tensor self) -> Tensor
18914inline at::Tensor _values_copy(const at::Tensor & self) {
18915 return at::_ops::_values_copy::call(self);
18916}
18917
18918// aten::indices_copy(Tensor self) -> Tensor
18919inline at::Tensor indices_copy(const at::Tensor & self) {
18920 return at::_ops::indices_copy::call(self);
18921}
18922
18923// aten::values_copy(Tensor self) -> Tensor
18924inline at::Tensor values_copy(const at::Tensor & self) {
18925 return at::_ops::values_copy::call(self);
18926}
18927
18928// aten::crow_indices_copy(Tensor self) -> Tensor
18930 return at::_ops::crow_indices_copy::call(self);
18931}
18932
18933// aten::col_indices_copy(Tensor self) -> Tensor
18935 return at::_ops::col_indices_copy::call(self);
18936}
18937
18938// aten::ccol_indices_copy(Tensor self) -> Tensor
18940 return at::_ops::ccol_indices_copy::call(self);
18941}
18942
18943// aten::row_indices_copy(Tensor self) -> Tensor
18945 return at::_ops::row_indices_copy::call(self);
18946}
18947
18948// aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[]
18949inline ::std::vector<at::Tensor> unbind_copy(const at::Tensor & self, int64_t dim=0) {
18950 return at::_ops::unbind_copy_int::call(self, dim);
18951}
18952
18953// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
18954inline void unbind_copy_out(at::TensorList out, const at::Tensor & self, int64_t dim=0) {
18955 return at::_ops::unbind_copy_int_out::call(self, dim, out);
18956}
18957// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()
18958inline void unbind_copy_outf(const at::Tensor & self, int64_t dim, at::TensorList out) {
18959 return at::_ops::unbind_copy_int_out::call(self, dim, out);
18960}
18961
18962// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
18963inline void split_copy_out(at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim=0) {
18964 return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
18965}
18966namespace symint {
18967 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
18968 void split_copy_out(at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim=0) {
18969 return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
18970 }
18971}
18972
18973// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
18974inline void split_copy_outf(const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) {
18975 return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
18976}
18977namespace symint {
18978 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
18979 void split_copy_outf(const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) {
18980 return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
18981 }
18982}
18983
18984// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
18985inline void split_copy_symint_out(at::TensorList out, const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) {
18986 return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
18987}
18988namespace symint {
18989 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
18990 void split_copy_out(at::TensorList out, const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) {
18991 return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
18992 }
18993}
18994
18995// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
18996inline void split_copy_symint_outf(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
18997 return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
18998}
18999namespace symint {
19000 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
19001 void split_copy_outf(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
19002 return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
19003 }
19004}
19005
19006// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
19007inline void split_with_sizes_copy_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) {
19008 return at::_ops::split_with_sizes_copy_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out);
19009}
19010namespace symint {
19011 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
19012 void split_with_sizes_copy_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) {
19013 return at::_ops::split_with_sizes_copy_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out);
19014 }
19015}
19016
19017// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
19018inline void split_with_sizes_copy_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) {
19019 return at::_ops::split_with_sizes_copy_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out);
19020}
19021namespace symint {
19022 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
19023 void split_with_sizes_copy_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) {
19024 return at::_ops::split_with_sizes_copy_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out);
19025 }
19026}
19027
19028// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
19029inline void split_with_sizes_copy_symint_out(at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) {
19030 return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out);
19031}
19032namespace symint {
19033 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
19034 void split_with_sizes_copy_out(at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) {
19035 return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out);
19036 }
19037}
19038
19039// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
19040inline void split_with_sizes_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
19041 return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out);
19042}
19043namespace symint {
19044 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
19045 void split_with_sizes_copy_outf(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
19046 return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out);
19047 }
19048}
19049
19050// aten::view_copy(Tensor self, SymInt[] size) -> Tensor
19051inline at::Tensor view_copy(const at::Tensor & self, at::IntArrayRef size) {
19052 return at::_ops::view_copy::call(self, c10::fromIntArrayRefSlow(size));
19053}
19054namespace symint {
19055 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
19056 at::Tensor view_copy(const at::Tensor & self, at::IntArrayRef size) {
19057 return at::_ops::view_copy::call(self, c10::fromIntArrayRefSlow(size));
19058 }
19059}
19060
19061// aten::view_copy(Tensor self, SymInt[] size) -> Tensor
19062inline at::Tensor view_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size) {
19063 return at::_ops::view_copy::call(self, size);
19064}
19065namespace symint {
19066 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
19067 at::Tensor view_copy(const at::Tensor & self, c10::SymIntArrayRef size) {
19068 return at::_ops::view_copy::call(self, size);
19069 }
19070}
19071
19072// aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor
19073inline at::Tensor view_copy(const at::Tensor & self, at::ScalarType dtype) {
19074 return at::_ops::view_copy_dtype::call(self, dtype);
19075}
19076
19077// aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor
19078inline at::Tensor unfold_copy(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
19079 return at::_ops::unfold_copy::call(self, dimension, size, step);
19080}
19081
19082// aten::alias_copy(Tensor self) -> Tensor
19083inline at::Tensor alias_copy(const at::Tensor & self) {
19084 return at::_ops::alias_copy::call(self);
19085}
19086
19087namespace symint {
19088 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
19089 at::Tensor to_padded_tensor(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt) {
19090 return at::_ops::to_padded_tensor::call(self, padding, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt);
19091 }
19092}
19093
19094namespace symint {
19095 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
19096 at::Tensor to_padded_tensor(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size=c10::nullopt) {
19097 return at::_ops::to_padded_tensor::call(self, padding, output_size);
19098 }
19099}
19100
19101// aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
19103 return at::_ops::_nested_tensor_softmax_with_shape::call(self, query);
19104}
19105
19106// aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor
19107inline at::Tensor _transformer_encoder_layer_fwd(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask={}, c10::optional<int64_t> mask_type=c10::nullopt) {
19108 return at::_ops::_transformer_encoder_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
19109}
19110
19111// aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)
19112inline ::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask={}, bool need_weights=true, bool average_attn_weights=true, c10::optional<int64_t> mask_type=c10::nullopt) {
19113 return at::_ops::_native_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
19114}
19115
19116// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> Tensor
19117inline at::Tensor scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask={}, double dropout_p=0.0, bool is_causal=false) {
19118 return at::_ops::scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, is_causal);
19119}
19120
19121// aten::_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)
19122inline ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask={}, double dropout_p=0.0, bool need_attn_weights=false, bool is_causal=false) {
19123 return at::_ops::_scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal);
19124}
19125
19126// aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> int
19127inline int64_t _fused_sdp_choice(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask={}, double dropout_p=0.0, bool is_causal=false) {
19128 return at::_ops::_fused_sdp_choice::call(query, key, value, attn_mask, dropout_p, is_causal);
19129}
19130
19131// aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None) -> (Tensor, Tensor)
19132inline ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask={}, double dropout_p=0.0, bool is_causal=false, const c10::optional<at::Tensor> & dropout_mask={}) {
19133 return at::_ops::_scaled_dot_product_attention_math::call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask);
19134}
19135
19136// aten::_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False) -> (Tensor ouput, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, int philox_seed, int philox_offset, Tensor debug_attn_mask)
19137inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t,int64_t,int64_t,int64_t,at::Tensor> _scaled_dot_product_flash_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p=0.0, bool is_causal=false, bool return_debug_mask=false) {
19138 return at::_ops::_scaled_dot_product_flash_attention::call(query, key, value, dropout_p, is_causal, return_debug_mask);
19139}
19140
19141// aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)
19142inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset) {
19143 return at::_ops::_scaled_dot_product_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
19144}
19145
19146// aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, bool compute_log_sumexp, bool is_causal=False) -> (Tensor, Tensor)
19147inline ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, bool compute_log_sumexp, bool is_causal=false) {
19148 return at::_ops::_scaled_dot_product_efficient_attention::call(query, key, value, compute_log_sumexp, is_causal);
19149}
19150
19151// aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
19152inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_backward(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, bool is_causal=false, bool chunk_grad_outputs=false) {
19153 return at::_ops::_scaled_dot_product_efficient_attention_backward::call(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs);
19154}
19155
19156// aten::_chunk_grad_outputs_efficient_attention(Tensor query, Tensor key, Tensor value, bool is_causal=False) -> bool
19157inline bool _chunk_grad_outputs_efficient_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, bool is_causal=false) {
19158 return at::_ops::_chunk_grad_outputs_efficient_attention::call(query, key, value, is_causal);
19159}
19160
19161// aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, bool return_debug_mask) -> (Tensor output, Tensor softmax_logsumexp, int philox_seed, int philox_offset, Tensor debug_attn_mask)
19162inline ::std::tuple<at::Tensor,at::Tensor,int64_t,int64_t,at::Tensor> _flash_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, bool return_debug_mask) {
19163 return at::_ops::_flash_attention_forward::call(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask);
19164}
19165
19166// aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor, Tensor, Tensor)
19167inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _flash_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset) {
19168 return at::_ops::_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
19169}
19170
19171// aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, bool compute_log_sumexp=False, bool causal=False) -> (Tensor, Tensor)
19172inline ::std::tuple<at::Tensor,at::Tensor> _efficient_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, c10::optional<int64_t> max_seqlen_q, bool compute_log_sumexp=false, bool causal=false) {
19173 return at::_ops::_efficient_attention_forward::call(query, key, value, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, compute_log_sumexp, causal);
19174}
19175
19176// aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor)
19177inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, bool is_causal=false, bool chunk_grad_outputs=false) {
19178 return at::_ops::_efficient_attention_backward::call(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs);
19179}
19180
19181// aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
19182inline at::Tensor _triton_scaled_dot_attention(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p=0.0) {
19183 return at::_ops::_triton_scaled_dot_attention::call(q, k, v, dropout_p);
19184}
19185
19186// aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor
19187inline at::Tensor _triton_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask={}) {
19188 return at::_ops::_triton_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask);
19189}
19190
19191// aten::special_airy_ai(Tensor x) -> Tensor
19193 return at::_ops::special_airy_ai::call(x);
19194}
19195
19196// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
19198 return at::_ops::special_airy_ai_out::call(x, out);
19199}
19200// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
19202 return at::_ops::special_airy_ai_out::call(x, out);
19203}
19204
19205// aten::_transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor)
19206inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transformer_decoder_only_layer_fwd(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask={}, const c10::optional<at::Tensor> & incr_key={}, const c10::optional<at::Tensor> & incr_value={}) {
19207 return at::_ops::_transformer_decoder_only_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value);
19208}
19209
19210// aten::_native_decoder_only_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True) -> (Tensor, Tensor, Tensor, Tensor)
19211inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_decoder_only_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask={}, const c10::optional<at::Tensor> & incr_key={}, const c10::optional<at::Tensor> & incr_value={}, bool need_weights=true, bool average_attn_weights=true) {
19212 return at::_ops::_native_decoder_only_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights);
19213}
19214
19215// aten::special_bessel_j0(Tensor self) -> Tensor
19217 return at::_ops::special_bessel_j0::call(self);
19218}
19219
19220// aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19222 return at::_ops::special_bessel_j0_out::call(self, out);
19223}
19224// aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19226 return at::_ops::special_bessel_j0_out::call(self, out);
19227}
19228
19229// aten::special_bessel_j1(Tensor self) -> Tensor
19231 return at::_ops::special_bessel_j1::call(self);
19232}
19233
19234// aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19236 return at::_ops::special_bessel_j1_out::call(self, out);
19237}
19238// aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19240 return at::_ops::special_bessel_j1_out::call(self, out);
19241}
19242
19243// aten::special_bessel_y0(Tensor self) -> Tensor
19245 return at::_ops::special_bessel_y0::call(self);
19246}
19247
19248// aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19250 return at::_ops::special_bessel_y0_out::call(self, out);
19251}
19252// aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19254 return at::_ops::special_bessel_y0_out::call(self, out);
19255}
19256
19257// aten::special_bessel_y1(Tensor self) -> Tensor
19259 return at::_ops::special_bessel_y1::call(self);
19260}
19261
19262// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19264 return at::_ops::special_bessel_y1_out::call(self, out);
19265}
19266// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19268 return at::_ops::special_bessel_y1_out::call(self, out);
19269}
19270
19271// aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
19273 return at::_ops::special_chebyshev_polynomial_t::call(x, n);
19274}
19275
19276// aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
19277inline at::Tensor special_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n) {
19278 return at::_ops::special_chebyshev_polynomial_t_x_scalar::call(x, n);
19279}
19280
19281// aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
19282inline at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n) {
19283 return at::_ops::special_chebyshev_polynomial_t_n_scalar::call(x, n);
19284}
19285
19286// aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19288 return at::_ops::special_chebyshev_polynomial_t_out::call(x, n, out);
19289}
19290// aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19292 return at::_ops::special_chebyshev_polynomial_t_out::call(x, n, out);
19293}
19294
19295// aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19296inline at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
19297 return at::_ops::special_chebyshev_polynomial_t_x_scalar_out::call(x, n, out);
19298}
19299// aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19300inline at::Tensor & special_chebyshev_polynomial_t_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
19301 return at::_ops::special_chebyshev_polynomial_t_x_scalar_out::call(x, n, out);
19302}
19303
19304// aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19305inline at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
19306 return at::_ops::special_chebyshev_polynomial_t_n_scalar_out::call(x, n, out);
19307}
19308// aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19309inline at::Tensor & special_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
19310 return at::_ops::special_chebyshev_polynomial_t_n_scalar_out::call(x, n, out);
19311}
19312
19313// aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
19315 return at::_ops::special_chebyshev_polynomial_u::call(x, n);
19316}
19317
19318// aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
19319inline at::Tensor special_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n) {
19320 return at::_ops::special_chebyshev_polynomial_u_x_scalar::call(x, n);
19321}
19322
19323// aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
19324inline at::Tensor special_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n) {
19325 return at::_ops::special_chebyshev_polynomial_u_n_scalar::call(x, n);
19326}
19327
19328// aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19330 return at::_ops::special_chebyshev_polynomial_u_out::call(x, n, out);
19331}
19332// aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19334 return at::_ops::special_chebyshev_polynomial_u_out::call(x, n, out);
19335}
19336
19337// aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19338inline at::Tensor & special_chebyshev_polynomial_u_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
19339 return at::_ops::special_chebyshev_polynomial_u_x_scalar_out::call(x, n, out);
19340}
19341// aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19342inline at::Tensor & special_chebyshev_polynomial_u_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
19343 return at::_ops::special_chebyshev_polynomial_u_x_scalar_out::call(x, n, out);
19344}
19345
19346// aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19347inline at::Tensor & special_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
19348 return at::_ops::special_chebyshev_polynomial_u_n_scalar_out::call(x, n, out);
19349}
19350// aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19351inline at::Tensor & special_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
19352 return at::_ops::special_chebyshev_polynomial_u_n_scalar_out::call(x, n, out);
19353}
19354
19355// aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
19357 return at::_ops::special_chebyshev_polynomial_v::call(x, n);
19358}
19359
19360// aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
19361inline at::Tensor special_chebyshev_polynomial_v(const at::Scalar & x, const at::Tensor & n) {
19362 return at::_ops::special_chebyshev_polynomial_v_x_scalar::call(x, n);
19363}
19364
19365// aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
19366inline at::Tensor special_chebyshev_polynomial_v(const at::Tensor & x, const at::Scalar & n) {
19367 return at::_ops::special_chebyshev_polynomial_v_n_scalar::call(x, n);
19368}
19369
19370// aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19372 return at::_ops::special_chebyshev_polynomial_v_out::call(x, n, out);
19373}
19374// aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19376 return at::_ops::special_chebyshev_polynomial_v_out::call(x, n, out);
19377}
19378
19379// aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19380inline at::Tensor & special_chebyshev_polynomial_v_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
19381 return at::_ops::special_chebyshev_polynomial_v_x_scalar_out::call(x, n, out);
19382}
19383// aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19384inline at::Tensor & special_chebyshev_polynomial_v_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
19385 return at::_ops::special_chebyshev_polynomial_v_x_scalar_out::call(x, n, out);
19386}
19387
19388// aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19389inline at::Tensor & special_chebyshev_polynomial_v_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
19390 return at::_ops::special_chebyshev_polynomial_v_n_scalar_out::call(x, n, out);
19391}
19392// aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19393inline at::Tensor & special_chebyshev_polynomial_v_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
19394 return at::_ops::special_chebyshev_polynomial_v_n_scalar_out::call(x, n, out);
19395}
19396
19397// aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
19399 return at::_ops::special_chebyshev_polynomial_w::call(x, n);
19400}
19401
19402// aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
19403inline at::Tensor special_chebyshev_polynomial_w(const at::Scalar & x, const at::Tensor & n) {
19404 return at::_ops::special_chebyshev_polynomial_w_x_scalar::call(x, n);
19405}
19406
19407// aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
19408inline at::Tensor special_chebyshev_polynomial_w(const at::Tensor & x, const at::Scalar & n) {
19409 return at::_ops::special_chebyshev_polynomial_w_n_scalar::call(x, n);
19410}
19411
19412// aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19414 return at::_ops::special_chebyshev_polynomial_w_out::call(x, n, out);
19415}
19416// aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19418 return at::_ops::special_chebyshev_polynomial_w_out::call(x, n, out);
19419}
19420
19421// aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19422inline at::Tensor & special_chebyshev_polynomial_w_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
19423 return at::_ops::special_chebyshev_polynomial_w_x_scalar_out::call(x, n, out);
19424}
19425// aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19426inline at::Tensor & special_chebyshev_polynomial_w_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
19427 return at::_ops::special_chebyshev_polynomial_w_x_scalar_out::call(x, n, out);
19428}
19429
19430// aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19431inline at::Tensor & special_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
19432 return at::_ops::special_chebyshev_polynomial_w_n_scalar_out::call(x, n, out);
19433}
19434// aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19435inline at::Tensor & special_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
19436 return at::_ops::special_chebyshev_polynomial_w_n_scalar_out::call(x, n, out);
19437}
19438
19439// aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor
19441 return at::_ops::special_hermite_polynomial_h::call(x, n);
19442}
19443
19444// aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor
19445inline at::Tensor special_hermite_polynomial_h(const at::Scalar & x, const at::Tensor & n) {
19446 return at::_ops::special_hermite_polynomial_h_x_scalar::call(x, n);
19447}
19448
19449// aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor
19450inline at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Scalar & n) {
19451 return at::_ops::special_hermite_polynomial_h_n_scalar::call(x, n);
19452}
19453
19454// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19456 return at::_ops::special_hermite_polynomial_h_out::call(x, n, out);
19457}
19458// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19460 return at::_ops::special_hermite_polynomial_h_out::call(x, n, out);
19461}
19462
19463// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19464inline at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
19465 return at::_ops::special_hermite_polynomial_h_x_scalar_out::call(x, n, out);
19466}
19467// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19468inline at::Tensor & special_hermite_polynomial_h_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
19469 return at::_ops::special_hermite_polynomial_h_x_scalar_out::call(x, n, out);
19470}
19471
19472// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19473inline at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
19474 return at::_ops::special_hermite_polynomial_h_n_scalar_out::call(x, n, out);
19475}
19476// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19477inline at::Tensor & special_hermite_polynomial_h_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
19478 return at::_ops::special_hermite_polynomial_h_n_scalar_out::call(x, n, out);
19479}
19480
19481// aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor
19483 return at::_ops::special_hermite_polynomial_he::call(x, n);
19484}
19485
19486// aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor
19487inline at::Tensor special_hermite_polynomial_he(const at::Scalar & x, const at::Tensor & n) {
19488 return at::_ops::special_hermite_polynomial_he_x_scalar::call(x, n);
19489}
19490
19491// aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor
19492inline at::Tensor special_hermite_polynomial_he(const at::Tensor & x, const at::Scalar & n) {
19493 return at::_ops::special_hermite_polynomial_he_n_scalar::call(x, n);
19494}
19495
19496// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19498 return at::_ops::special_hermite_polynomial_he_out::call(x, n, out);
19499}
19500// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19502 return at::_ops::special_hermite_polynomial_he_out::call(x, n, out);
19503}
19504
19505// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19506inline at::Tensor & special_hermite_polynomial_he_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
19507 return at::_ops::special_hermite_polynomial_he_x_scalar_out::call(x, n, out);
19508}
19509// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19510inline at::Tensor & special_hermite_polynomial_he_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
19511 return at::_ops::special_hermite_polynomial_he_x_scalar_out::call(x, n, out);
19512}
19513
19514// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19515inline at::Tensor & special_hermite_polynomial_he_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
19516 return at::_ops::special_hermite_polynomial_he_n_scalar_out::call(x, n, out);
19517}
19518// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19519inline at::Tensor & special_hermite_polynomial_he_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
19520 return at::_ops::special_hermite_polynomial_he_n_scalar_out::call(x, n, out);
19521}
19522
19523// aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor
19525 return at::_ops::special_laguerre_polynomial_l::call(x, n);
19526}
19527
19528// aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor
19529inline at::Tensor special_laguerre_polynomial_l(const at::Scalar & x, const at::Tensor & n) {
19530 return at::_ops::special_laguerre_polynomial_l_x_scalar::call(x, n);
19531}
19532
19533// aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor
19534inline at::Tensor special_laguerre_polynomial_l(const at::Tensor & x, const at::Scalar & n) {
19535 return at::_ops::special_laguerre_polynomial_l_n_scalar::call(x, n);
19536}
19537
19538// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19540 return at::_ops::special_laguerre_polynomial_l_out::call(x, n, out);
19541}
19542// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19544 return at::_ops::special_laguerre_polynomial_l_out::call(x, n, out);
19545}
19546
19547// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19548inline at::Tensor & special_laguerre_polynomial_l_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
19549 return at::_ops::special_laguerre_polynomial_l_x_scalar_out::call(x, n, out);
19550}
19551// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19552inline at::Tensor & special_laguerre_polynomial_l_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
19553 return at::_ops::special_laguerre_polynomial_l_x_scalar_out::call(x, n, out);
19554}
19555
19556// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19557inline at::Tensor & special_laguerre_polynomial_l_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
19558 return at::_ops::special_laguerre_polynomial_l_n_scalar_out::call(x, n, out);
19559}
19560// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19561inline at::Tensor & special_laguerre_polynomial_l_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
19562 return at::_ops::special_laguerre_polynomial_l_n_scalar_out::call(x, n, out);
19563}
19564
19565// aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor
19567 return at::_ops::special_legendre_polynomial_p::call(x, n);
19568}
19569
19570// aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor
19571inline at::Tensor special_legendre_polynomial_p(const at::Scalar & x, const at::Tensor & n) {
19572 return at::_ops::special_legendre_polynomial_p_x_scalar::call(x, n);
19573}
19574
19575// aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor
19576inline at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Scalar & n) {
19577 return at::_ops::special_legendre_polynomial_p_n_scalar::call(x, n);
19578}
19579
19580// aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19582 return at::_ops::special_legendre_polynomial_p_out::call(x, n, out);
19583}
19584// aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19586 return at::_ops::special_legendre_polynomial_p_out::call(x, n, out);
19587}
19588
19589// aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19590inline at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
19591 return at::_ops::special_legendre_polynomial_p_x_scalar_out::call(x, n, out);
19592}
19593// aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19594inline at::Tensor & special_legendre_polynomial_p_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
19595 return at::_ops::special_legendre_polynomial_p_x_scalar_out::call(x, n, out);
19596}
19597
19598// aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19599inline at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
19600 return at::_ops::special_legendre_polynomial_p_n_scalar_out::call(x, n, out);
19601}
19602// aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19603inline at::Tensor & special_legendre_polynomial_p_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
19604 return at::_ops::special_legendre_polynomial_p_n_scalar_out::call(x, n, out);
19605}
19606
19607// aten::special_modified_bessel_i0(Tensor self) -> Tensor
19609 return at::_ops::special_modified_bessel_i0::call(self);
19610}
19611
19612// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19614 return at::_ops::special_modified_bessel_i0_out::call(self, out);
19615}
19616// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19618 return at::_ops::special_modified_bessel_i0_out::call(self, out);
19619}
19620
19621// aten::special_modified_bessel_i1(Tensor self) -> Tensor
19623 return at::_ops::special_modified_bessel_i1::call(self);
19624}
19625
19626// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19628 return at::_ops::special_modified_bessel_i1_out::call(self, out);
19629}
19630// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19632 return at::_ops::special_modified_bessel_i1_out::call(self, out);
19633}
19634
19635// aten::special_modified_bessel_k0(Tensor self) -> Tensor
19637 return at::_ops::special_modified_bessel_k0::call(self);
19638}
19639
19640// aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19642 return at::_ops::special_modified_bessel_k0_out::call(self, out);
19643}
19644// aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19646 return at::_ops::special_modified_bessel_k0_out::call(self, out);
19647}
19648
19649// aten::special_modified_bessel_k1(Tensor self) -> Tensor
19651 return at::_ops::special_modified_bessel_k1::call(self);
19652}
19653
19654// aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19656 return at::_ops::special_modified_bessel_k1_out::call(self, out);
19657}
19658// aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
19660 return at::_ops::special_modified_bessel_k1_out::call(self, out);
19661}
19662
19663// aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor
19665 return at::_ops::special_scaled_modified_bessel_k0::call(x);
19666}
19667
19668// aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
19670 return at::_ops::special_scaled_modified_bessel_k0_out::call(x, out);
19671}
19672// aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
19674 return at::_ops::special_scaled_modified_bessel_k0_out::call(x, out);
19675}
19676
19677// aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor
19679 return at::_ops::special_scaled_modified_bessel_k1::call(x);
19680}
19681
19682// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
19684 return at::_ops::special_scaled_modified_bessel_k1_out::call(x, out);
19685}
19686// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
19688 return at::_ops::special_scaled_modified_bessel_k1_out::call(x, out);
19689}
19690
19691// aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor
19693 return at::_ops::special_shifted_chebyshev_polynomial_t::call(x, n);
19694}
19695
19696// aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor
19697inline at::Tensor special_shifted_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n) {
19698 return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar::call(x, n);
19699}
19700
19701// aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor
19702inline at::Tensor special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n) {
19703 return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar::call(x, n);
19704}
19705
19706// aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19708 return at::_ops::special_shifted_chebyshev_polynomial_t_out::call(x, n, out);
19709}
19710// aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19712 return at::_ops::special_shifted_chebyshev_polynomial_t_out::call(x, n, out);
19713}
19714
19715// aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19716inline at::Tensor & special_shifted_chebyshev_polynomial_t_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
19717 return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar_out::call(x, n, out);
19718}
19719// aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19720inline at::Tensor & special_shifted_chebyshev_polynomial_t_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
19721 return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar_out::call(x, n, out);
19722}
19723
19724// aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19725inline at::Tensor & special_shifted_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
19726 return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar_out::call(x, n, out);
19727}
19728// aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19729inline at::Tensor & special_shifted_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
19730 return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar_out::call(x, n, out);
19731}
19732
19733// aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor
19735 return at::_ops::special_shifted_chebyshev_polynomial_u::call(x, n);
19736}
19737
19738// aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor
19739inline at::Tensor special_shifted_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n) {
19740 return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::call(x, n);
19741}
19742
19743// aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor
19744inline at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n) {
19745 return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::call(x, n);
19746}
19747
19748// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19750 return at::_ops::special_shifted_chebyshev_polynomial_u_out::call(x, n, out);
19751}
19752// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19754 return at::_ops::special_shifted_chebyshev_polynomial_u_out::call(x, n, out);
19755}
19756
19757// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19758inline at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
19759 return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar_out::call(x, n, out);
19760}
19761// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19762inline at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
19763 return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar_out::call(x, n, out);
19764}
19765
19766// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19767inline at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
19768 return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar_out::call(x, n, out);
19769}
19770// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19771inline at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
19772 return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar_out::call(x, n, out);
19773}
19774
19775// aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor
19777 return at::_ops::special_shifted_chebyshev_polynomial_v::call(x, n);
19778}
19779
19780// aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor
19781inline at::Tensor special_shifted_chebyshev_polynomial_v(const at::Scalar & x, const at::Tensor & n) {
19782 return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::call(x, n);
19783}
19784
19785// aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor
19786inline at::Tensor special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Scalar & n) {
19787 return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::call(x, n);
19788}
19789
19790// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19792 return at::_ops::special_shifted_chebyshev_polynomial_v_out::call(x, n, out);
19793}
19794// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19796 return at::_ops::special_shifted_chebyshev_polynomial_v_out::call(x, n, out);
19797}
19798
19799// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19800inline at::Tensor & special_shifted_chebyshev_polynomial_v_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
19801 return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar_out::call(x, n, out);
19802}
19803// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19804inline at::Tensor & special_shifted_chebyshev_polynomial_v_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
19805 return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar_out::call(x, n, out);
19806}
19807
19808// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19809inline at::Tensor & special_shifted_chebyshev_polynomial_v_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
19810 return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar_out::call(x, n, out);
19811}
19812// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19813inline at::Tensor & special_shifted_chebyshev_polynomial_v_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
19814 return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar_out::call(x, n, out);
19815}
19816
19817// aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor
19819 return at::_ops::special_shifted_chebyshev_polynomial_w::call(x, n);
19820}
19821
19822// aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor
19823inline at::Tensor special_shifted_chebyshev_polynomial_w(const at::Scalar & x, const at::Tensor & n) {
19824 return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar::call(x, n);
19825}
19826
19827// aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor
19828inline at::Tensor special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Scalar & n) {
19829 return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar::call(x, n);
19830}
19831
19832// aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19834 return at::_ops::special_shifted_chebyshev_polynomial_w_out::call(x, n, out);
19835}
19836// aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19838 return at::_ops::special_shifted_chebyshev_polynomial_w_out::call(x, n, out);
19839}
19840
19841// aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19842inline at::Tensor & special_shifted_chebyshev_polynomial_w_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n) {
19843 return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar_out::call(x, n, out);
19844}
19845// aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)
19846inline at::Tensor & special_shifted_chebyshev_polynomial_w_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
19847 return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar_out::call(x, n, out);
19848}
19849
19850// aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19851inline at::Tensor & special_shifted_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n) {
19852 return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar_out::call(x, n, out);
19853}
19854// aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)
19855inline at::Tensor & special_shifted_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
19856 return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar_out::call(x, n, out);
19857}
19858
19859// aten::special_spherical_bessel_j0(Tensor x) -> Tensor
19861 return at::_ops::special_spherical_bessel_j0::call(x);
19862}
19863
19864// aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
19866 return at::_ops::special_spherical_bessel_j0_out::call(x, out);
19867}
19868// aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)
19870 return at::_ops::special_spherical_bessel_j0_out::call(x, out);
19871}
19872
19873// aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor
19874inline at::Tensor _foobar(const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true) {
19875 return at::_ops::_foobar::call(self, arg1, arg2, arg3);
19876}
19877
19878// aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
19879inline void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale={}, const c10::optional<at::Tensor> & found_inf={}) {
19880 return at::_ops::_fused_adam_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
19881}
19882
19883// aten::_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()
19884inline void _fused_adamw_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale={}, const c10::optional<at::Tensor> & found_inf={}) {
19885 return at::_ops::_fused_adamw_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
19886}
19887
19888// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)
19889inline at::Tensor & _new_zeros_with_same_feature_meta_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0) {
19890 return at::_ops::_new_zeros_with_same_feature_meta_out::call(self, other, self_num_batch_dims, out);
19891}
19892// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)
19893inline at::Tensor & _new_zeros_with_same_feature_meta_outf(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) {
19894 return at::_ops::_new_zeros_with_same_feature_meta_out::call(self, other, self_num_batch_dims, out);
19895}
19896
19897// aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
19898inline ::std::tuple<at::Tensor &,at::Tensor &> _cudnn_ctc_loss_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
19899 return at::_ops::_cudnn_ctc_loss_out::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1);
19900}
19901// aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
19902inline ::std::tuple<at::Tensor &,at::Tensor &> _cudnn_ctc_loss_outf(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
19903 return at::_ops::_cudnn_ctc_loss_out::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1);
19904}
19905
19906// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
19907inline at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
19908 return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
19909}
19910namespace symint {
19911 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
19912 at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
19913 return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
19914 }
19915}
19916
19917// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
19918inline at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
19919 return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
19920}
19921namespace symint {
19922 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
19923 at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
19924 return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
19925 }
19926}
19927
19928// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
19929inline at::Tensor & _cudnn_rnn_flatten_weight_symint_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
19930 return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
19931}
19932namespace symint {
19933 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
19934 at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
19935 return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
19936 }
19937}
19938
19939// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)
19940inline at::Tensor & _cudnn_rnn_flatten_weight_symint_outf(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
19941 return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
19942}
19943namespace symint {
19944 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
19945 at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
19946 return at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out);
19947 }
19948}
19949
19950// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
19951inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
19952 return at::_ops::_cudnn_rnn_out::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, out0, out1, out2, out3, out4);
19953}
19954namespace symint {
19955 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
19956 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
19957 return at::_ops::_cudnn_rnn_out::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, out0, out1, out2, out3, out4);
19958 }
19959}
19960
19961// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
19962inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
19963 return at::_ops::_cudnn_rnn_out::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, out0, out1, out2, out3, out4);
19964}
19965namespace symint {
19966 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
19967 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
19968 return at::_ops::_cudnn_rnn_out::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, out0, out1, out2, out3, out4);
19969 }
19970}
19971
19972// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
19973inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
19974 return at::_ops::_cudnn_rnn_out::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
19975}
19976namespace symint {
19977 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
19978 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
19979 return at::_ops::_cudnn_rnn_out::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
19980 }
19981}
19982
19983// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
19984inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_symint_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
19985 return at::_ops::_cudnn_rnn_out::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
19986}
19987namespace symint {
19988 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
19989 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
19990 return at::_ops::_cudnn_rnn_out::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
19991 }
19992}
19993
19994// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
19995inline void _cudnn_rnn_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
19996 return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3);
19997}
19998namespace symint {
19999 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20000 void _cudnn_rnn_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
20001 return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3);
20002 }
20003}
20004
20005// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
20006inline void _cudnn_rnn_backward_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
20007 return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3);
20008}
20009namespace symint {
20010 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20011 void _cudnn_rnn_backward_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
20012 return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRefSlow(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3);
20013 }
20014}
20015
20016// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
20017inline void _cudnn_rnn_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
20018 return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
20019}
20020namespace symint {
20021 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20022 void _cudnn_rnn_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
20023 return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
20024 }
20025}
20026
20027// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
20028inline void _cudnn_rnn_backward_symint_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
20029 return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
20030}
20031namespace symint {
20032 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20033 void _cudnn_rnn_backward_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
20034 return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
20035 }
20036}
20037
20038// aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)
20039inline at::Tensor & _cudnn_init_dropout_state_out(at::Tensor & out, double dropout, bool train, int64_t dropout_seed) {
20040 return at::_ops::_cudnn_init_dropout_state_out::call(dropout, train, dropout_seed, out);
20041}
20042// aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)
20043inline at::Tensor & _cudnn_init_dropout_state_outf(double dropout, bool train, int64_t dropout_seed, at::Tensor & out) {
20044 return at::_ops::_cudnn_init_dropout_state_out::call(dropout, train, dropout_seed, out);
20045}
20046
20047// aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
20048inline ::std::tuple<at::Tensor &,at::Tensor &> _fused_dropout_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, double p, c10::optional<at::Generator> generator=c10::nullopt) {
20049 return at::_ops::_fused_dropout_out::call(self, p, generator, out0, out1);
20050}
20051// aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
20052inline ::std::tuple<at::Tensor &,at::Tensor &> _fused_dropout_outf(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out0, at::Tensor & out1) {
20053 return at::_ops::_fused_dropout_out::call(self, p, generator, out0, out1);
20054}
20055
20056// aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
20057inline at::Tensor & _masked_scale_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, double scale) {
20058 return at::_ops::_masked_scale_out::call(self, mask, scale, out);
20059}
20060// aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
20061inline at::Tensor & _masked_scale_outf(const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) {
20062 return at::_ops::_masked_scale_out::call(self, mask, scale, out);
20063}
20064
20065// aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
20066inline ::std::tuple<at::Tensor &,at::Tensor &> native_dropout_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, double p, c10::optional<bool> train) {
20067 return at::_ops::native_dropout_out::call(input, p, train, out0, out1);
20068}
20069// aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
20070inline ::std::tuple<at::Tensor &,at::Tensor &> native_dropout_outf(const at::Tensor & input, double p, c10::optional<bool> train, at::Tensor & out0, at::Tensor & out1) {
20071 return at::_ops::native_dropout_out::call(input, p, train, out0, out1);
20072}
20073
20074// aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
20075inline at::Tensor & native_dropout_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
20076 return at::_ops::native_dropout_backward_out::call(grad_output, mask, scale, out);
20077}
20078// aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)
20079inline at::Tensor & native_dropout_backward_outf(const at::Tensor & grad_output, const at::Tensor & mask, double scale, at::Tensor & out) {
20080 return at::_ops::native_dropout_backward_out::call(grad_output, mask, scale, out);
20081}
20082
20083// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
20085 return at::_ops::_conj_physical_out::call(self, out);
20086}
20087// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
20089 return at::_ops::_conj_physical_out::call(self, out);
20090}
20091
20092// aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
20093inline at::Tensor & _add_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) {
20094 return at::_ops::_add_relu_Scalar_out::call(self, other, alpha, out);
20095}
20096// aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
20097inline at::Tensor & _add_relu_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
20098 return at::_ops::_add_relu_Scalar_out::call(self, other, alpha, out);
20099}
20100
20101// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
20102inline at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) {
20103 return at::_ops::add_Scalar_out::call(self, other, alpha, out);
20104}
20105// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
20106inline at::Tensor & add_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
20107 return at::_ops::add_Scalar_out::call(self, other, alpha, out);
20108}
20109
20110// aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
20111inline at::Tensor & affine_grid_generator_out(at::Tensor & out, const at::Tensor & theta, at::IntArrayRef size, bool align_corners) {
20112 return at::_ops::affine_grid_generator_out::call(theta, size, align_corners, out);
20113}
20114// aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
20115inline at::Tensor & affine_grid_generator_outf(const at::Tensor & theta, at::IntArrayRef size, bool align_corners, at::Tensor & out) {
20116 return at::_ops::affine_grid_generator_out::call(theta, size, align_corners, out);
20117}
20118
20119// aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
20120inline at::Tensor & bartlett_window_out(at::Tensor & out, int64_t window_length) {
20121 return at::_ops::bartlett_window_out::call(window_length, out);
20122}
20123// aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
20124inline at::Tensor & bartlett_window_outf(int64_t window_length, at::Tensor & out) {
20125 return at::_ops::bartlett_window_out::call(window_length, out);
20126}
20127
20128// aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
20129inline at::Tensor & bartlett_window_out(at::Tensor & out, int64_t window_length, bool periodic) {
20130 return at::_ops::bartlett_window_periodic_out::call(window_length, periodic, out);
20131}
20132// aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
20133inline at::Tensor & bartlett_window_outf(int64_t window_length, bool periodic, at::Tensor & out) {
20134 return at::_ops::bartlett_window_periodic_out::call(window_length, periodic, out);
20135}
20136
20137// aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)
20138inline at::Tensor & quantized_batch_norm_out(at::Tensor & out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
20139 return at::_ops::quantized_batch_norm_out::call(input, weight, bias, mean, var, eps, output_scale, output_zero_point, out);
20140}
20141// aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)
20142inline at::Tensor & quantized_batch_norm_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out) {
20143 return at::_ops::quantized_batch_norm_out::call(input, weight, bias, mean, var, eps, output_scale, output_zero_point, out);
20144}
20145
20146// aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
20148 return at::_ops::bernoulli_Tensor_out::call(self, p, generator, out);
20149}
20150// aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
20151inline at::Tensor & bernoulli_outf(const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator, at::Tensor & out) {
20152 return at::_ops::bernoulli_Tensor_out::call(self, p, generator, out);
20153}
20154
20155// aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor
20157 return at::_ops::bernoulli_Tensor::call(self, p, generator);
20158}
20159
20160// aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
20161inline at::Tensor & bernoulli_out(at::Tensor & out, const at::Tensor & self, double p=0.5, c10::optional<at::Generator> generator=c10::nullopt) {
20162 return at::_ops::bernoulli_float_out::call(self, p, generator, out);
20163}
20164// aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
20165inline at::Tensor & bernoulli_outf(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out) {
20166 return at::_ops::bernoulli_float_out::call(self, p, generator, out);
20167}
20168
20169// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
20170inline at::Tensor & binary_cross_entropy_with_logits_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, const c10::optional<at::Tensor> & pos_weight={}, int64_t reduction=at::Reduction::Mean) {
20171 return at::_ops::binary_cross_entropy_with_logits_out::call(self, target, weight, pos_weight, reduction, out);
20172}
20173// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
20174inline at::Tensor & binary_cross_entropy_with_logits_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out) {
20175 return at::_ops::binary_cross_entropy_with_logits_out::call(self, target, weight, pos_weight, reduction, out);
20176}
20177
20178// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)
20179inline at::Tensor & bincount_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Tensor> & weights={}, int64_t minlength=0) {
20180 return at::_ops::bincount_out::call(self, weights, minlength, out);
20181}
20182// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)
20183inline at::Tensor & bincount_outf(const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength, at::Tensor & out) {
20184 return at::_ops::bincount_out::call(self, weights, minlength, out);
20185}
20186
20187// aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
20188inline at::Tensor & blackman_window_out(at::Tensor & out, int64_t window_length) {
20189 return at::_ops::blackman_window_out::call(window_length, out);
20190}
20191// aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
20192inline at::Tensor & blackman_window_outf(int64_t window_length, at::Tensor & out) {
20193 return at::_ops::blackman_window_out::call(window_length, out);
20194}
20195
20196// aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
20197inline at::Tensor & blackman_window_out(at::Tensor & out, int64_t window_length, bool periodic) {
20198 return at::_ops::blackman_window_periodic_out::call(window_length, periodic, out);
20199}
20200// aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
20201inline at::Tensor & blackman_window_outf(int64_t window_length, bool periodic, at::Tensor & out) {
20202 return at::_ops::blackman_window_periodic_out::call(window_length, periodic, out);
20203}
20204
20205// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
20207 return at::_ops::block_diag_out::call(tensors, out);
20208}
20209// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)
20211 return at::_ops::block_diag_out::call(tensors, out);
20212}
20213
20214// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)
20215inline at::Tensor & constant_pad_nd_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) {
20216 return at::_ops::constant_pad_nd_out::call(self, c10::fromIntArrayRefSlow(pad), value, out);
20217}
20218namespace symint {
20219 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20220 at::Tensor & constant_pad_nd_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) {
20221 return at::_ops::constant_pad_nd_out::call(self, c10::fromIntArrayRefSlow(pad), value, out);
20222 }
20223}
20224
20225// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)
20226inline at::Tensor & constant_pad_nd_outf(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value, at::Tensor & out) {
20227 return at::_ops::constant_pad_nd_out::call(self, c10::fromIntArrayRefSlow(pad), value, out);
20228}
20229namespace symint {
20230 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20231 at::Tensor & constant_pad_nd_outf(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value, at::Tensor & out) {
20232 return at::_ops::constant_pad_nd_out::call(self, c10::fromIntArrayRefSlow(pad), value, out);
20233 }
20234}
20235
20236// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)
20237inline at::Tensor & constant_pad_nd_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) {
20238 return at::_ops::constant_pad_nd_out::call(self, pad, value, out);
20239}
20240namespace symint {
20241 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20242 at::Tensor & constant_pad_nd_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0) {
20243 return at::_ops::constant_pad_nd_out::call(self, pad, value, out);
20244 }
20245}
20246
20247// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)
20248inline at::Tensor & constant_pad_nd_symint_outf(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) {
20249 return at::_ops::constant_pad_nd_out::call(self, pad, value, out);
20250}
20251namespace symint {
20252 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20253 at::Tensor & constant_pad_nd_outf(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) {
20254 return at::_ops::constant_pad_nd_out::call(self, pad, value, out);
20255 }
20256}
20257
20258// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
20259inline at::Tensor & convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
20260 return at::_ops::convolution_out::call(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, out);
20261}
20262namespace symint {
20263 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20264 at::Tensor & convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
20265 return at::_ops::convolution_out::call(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, out);
20266 }
20267}
20268
20269// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
20270inline at::Tensor & convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) {
20271 return at::_ops::convolution_out::call(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, out);
20272}
20273namespace symint {
20274 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20275 at::Tensor & convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) {
20276 return at::_ops::convolution_out::call(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, out);
20277 }
20278}
20279
20280// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
20281inline at::Tensor & convolution_symint_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups) {
20282 return at::_ops::convolution_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
20283}
20284namespace symint {
20285 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20286 at::Tensor & convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups) {
20287 return at::_ops::convolution_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
20288 }
20289}
20290
20291// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
20292inline at::Tensor & convolution_symint_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, at::Tensor & out) {
20293 return at::_ops::convolution_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
20294}
20295namespace symint {
20296 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20297 at::Tensor & convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, at::Tensor & out) {
20298 return at::_ops::convolution_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
20299 }
20300}
20301
20302// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
20303inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
20304 return at::_ops::convolution_backward_out::call(grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2);
20305}
20306namespace symint {
20307 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20308 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
20309 return at::_ops::convolution_backward_out::call(grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2);
20310 }
20311}
20312
20313// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
20314inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
20315 return at::_ops::convolution_backward_out::call(grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2);
20316}
20317namespace symint {
20318 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20319 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
20320 return at::_ops::convolution_backward_out::call(grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*bias_sizes)) : c10::nullopt, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2);
20321 }
20322}
20323
20324// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
20325inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
20326 return at::_ops::convolution_backward_out::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
20327}
20328namespace symint {
20329 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20330 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
20331 return at::_ops::convolution_backward_out::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
20332 }
20333}
20334
20335// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
20336inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
20337 return at::_ops::convolution_backward_out::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
20338}
20339namespace symint {
20340 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20341 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
20342 return at::_ops::convolution_backward_out::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
20343 }
20344}
20345
20346// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
20347inline at::Tensor & convolution_overrideable_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
20348 return at::_ops::convolution_overrideable_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
20349}
20350// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)
20351inline at::Tensor & convolution_overrideable_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) {
20352 return at::_ops::convolution_overrideable_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out);
20353}
20354
20355// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
20356inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_overrideable_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
20357 return at::_ops::convolution_backward_overrideable_out::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
20358}
20359// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
20360inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_overrideable_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
20361 return at::_ops::convolution_backward_overrideable_out::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2);
20362}
20363
20364// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
20365inline at::Tensor & _convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
20366 return at::_ops::_convolution_out::call(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out);
20367}
20368namespace symint {
20369 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20370 at::Tensor & _convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
20371 return at::_ops::_convolution_out::call(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out);
20372 }
20373}
20374
20375// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
20376inline at::Tensor & _convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) {
20377 return at::_ops::_convolution_out::call(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out);
20378}
20379namespace symint {
20380 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20381 at::Tensor & _convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) {
20382 return at::_ops::_convolution_out::call(input, weight, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, transposed, c10::fromIntArrayRefSlow(output_padding), groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out);
20383 }
20384}
20385
20386// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
20387inline at::Tensor & _convolution_symint_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
20388 return at::_ops::_convolution_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out);
20389}
20390namespace symint {
20391 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20392 at::Tensor & _convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
20393 return at::_ops::_convolution_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out);
20394 }
20395}
20396
20397// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
20398inline at::Tensor & _convolution_symint_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) {
20399 return at::_ops::_convolution_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out);
20400}
20401namespace symint {
20402 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20403 at::Tensor & _convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) {
20404 return at::_ops::_convolution_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out);
20405 }
20406}
20407
20408// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)
20409inline at::Tensor & conv_tbc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad=0) {
20410 return at::_ops::conv_tbc_out::call(self, weight, bias, pad, out);
20411}
20412// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)
20413inline at::Tensor & conv_tbc_outf(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) {
20414 return at::_ops::conv_tbc_out::call(self, weight, bias, pad, out);
20415}
20416
20417// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
20418inline at::Tensor & copy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) {
20419 return at::_ops::copy_out::call(self, src, non_blocking, out);
20420}
20421// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
20422inline at::Tensor & copy_outf(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) {
20423 return at::_ops::copy_out::call(self, src, non_blocking, out);
20424}
20425
20426// aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
20427inline at::Tensor & _copy_from_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & dst, bool non_blocking=false) {
20428 return at::_ops::_copy_from_out::call(self, dst, non_blocking, out);
20429}
20430// aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
20431inline at::Tensor & _copy_from_outf(const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) {
20432 return at::_ops::_copy_from_out::call(self, dst, non_blocking, out);
20433}
20434
20435// aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)
20436inline at::Tensor & _copy_from_and_resize_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & dst) {
20437 return at::_ops::_copy_from_and_resize_out::call(self, dst, out);
20438}
20439// aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)
20440inline at::Tensor & _copy_from_and_resize_outf(const at::Tensor & self, const at::Tensor & dst, at::Tensor & out) {
20441 return at::_ops::_copy_from_and_resize_out::call(self, dst, out);
20442}
20443
20444// aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
20445inline at::Tensor & count_nonzero_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim) {
20446 return at::_ops::count_nonzero_dim_IntList_out::call(self, dim, out);
20447}
20448// aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
20449inline at::Tensor & count_nonzero_outf(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
20450 return at::_ops::count_nonzero_dim_IntList_out::call(self, dim, out);
20451}
20452
20453// aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
20455 return at::_ops::count_nonzero_out::call(self, dim, out);
20456}
20457// aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
20459 return at::_ops::count_nonzero_out::call(self, dim, out);
20460}
20461
20462// aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
20463inline at::Tensor & cudnn_affine_grid_generator_out(at::Tensor & out, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
20464 return at::_ops::cudnn_affine_grid_generator_out::call(theta, N, C, H, W, out);
20465}
20466// aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
20467inline at::Tensor & cudnn_affine_grid_generator_outf(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
20468 return at::_ops::cudnn_affine_grid_generator_out::call(theta, N, C, H, W, out);
20469}
20470
20471// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
20472inline at::Tensor & cudnn_affine_grid_generator_backward_out(at::Tensor & out, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) {
20473 return at::_ops::cudnn_affine_grid_generator_backward_out::call(grad, N, C, H, W, out);
20474}
20475// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
20476inline at::Tensor & cudnn_affine_grid_generator_backward_outf(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
20477 return at::_ops::cudnn_affine_grid_generator_backward_out::call(grad, N, C, H, W, out);
20478}
20479
20480// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
20481inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
20482 return at::_ops::cudnn_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3);
20483}
20484// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
20485inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
20486 return at::_ops::cudnn_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3);
20487}
20488
20489// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
20490inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace) {
20491 return at::_ops::cudnn_batch_norm_backward_out::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2);
20492}
20493// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
20494inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_outf(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
20495 return at::_ops::cudnn_batch_norm_backward_out::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2);
20496}
20497
20498// aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
20499inline at::Tensor & cudnn_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
20500 return at::_ops::cudnn_convolution_out::call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
20501}
20502// aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
20503inline at::Tensor & cudnn_convolution_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
20504 return at::_ops::cudnn_convolution_out::call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
20505}
20506
20507// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
20508inline at::Tensor & cudnn_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
20509 return at::_ops::cudnn_convolution_transpose_out::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
20510}
20511// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)
20512inline at::Tensor & cudnn_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
20513 return at::_ops::cudnn_convolution_transpose_out::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out);
20514}
20515
20516// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
20517inline at::Tensor & _mps_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
20518 return at::_ops::_mps_convolution_transpose_out::call(self, weight, padding, output_padding, stride, dilation, groups, out);
20519}
20520// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
20521inline at::Tensor & _mps_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
20522 return at::_ops::_mps_convolution_transpose_out::call(self, weight, padding, output_padding, stride, dilation, groups, out);
20523}
20524
20525// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
20526inline ::std::tuple<at::Tensor &,at::Tensor &> mps_convolution_transpose_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask) {
20527 return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1);
20528}
20529// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
20530inline ::std::tuple<at::Tensor &,at::Tensor &> mps_convolution_transpose_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
20531 return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1);
20532}
20533
20534// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
20535inline at::Tensor & cudnn_convolution_relu_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
20536 return at::_ops::cudnn_convolution_relu_out::call(self, weight, bias, stride, padding, dilation, groups, out);
20537}
20538// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
20539inline at::Tensor & cudnn_convolution_relu_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
20540 return at::_ops::cudnn_convolution_relu_out::call(self, weight, bias, stride, padding, dilation, groups, out);
20541}
20542
20543// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
20544inline at::Tensor & cudnn_convolution_add_relu_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
20545 return at::_ops::cudnn_convolution_add_relu_out::call(self, weight, z, alpha, bias, stride, padding, dilation, groups, out);
20546}
20547// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
20548inline at::Tensor & cudnn_convolution_add_relu_outf(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
20549 return at::_ops::cudnn_convolution_add_relu_out::call(self, weight, z, alpha, bias, stride, padding, dilation, groups, out);
20550}
20551
20552// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)
20553inline at::Tensor & cudnn_grid_sampler_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & grid) {
20554 return at::_ops::cudnn_grid_sampler_out::call(self, grid, out);
20555}
20556// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)
20557inline at::Tensor & cudnn_grid_sampler_outf(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) {
20558 return at::_ops::cudnn_grid_sampler_out::call(self, grid, out);
20559}
20560
20561// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
20562inline ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
20563 return at::_ops::cudnn_grid_sampler_backward_out::call(self, grid, grad_output, out0, out1);
20564}
20565// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
20566inline ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_outf(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) {
20567 return at::_ops::cudnn_grid_sampler_backward_out::call(self, grid, grad_output, out0, out1);
20568}
20569
20570// aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
20571inline ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false) {
20572 return at::_ops::_ctc_loss_out::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
20573}
20574// aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
20575inline ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_outf(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
20576 return at::_ops::_ctc_loss_out::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
20577}
20578
20579// aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
20580inline ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, bool zero_infinity=false) {
20581 return at::_ops::_ctc_loss_Tensor_out::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
20582}
20583// aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
20584inline ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_outf(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
20585 return at::_ops::_ctc_loss_Tensor_out::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1);
20586}
20587
20588// aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)
20589inline at::Tensor & _ctc_loss_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false) {
20590 return at::_ops::_ctc_loss_backward_out::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out);
20591}
20592// aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)
20593inline at::Tensor & _ctc_loss_backward_outf(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) {
20594 return at::_ops::_ctc_loss_backward_out::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out);
20595}
20596
20597// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)
20598inline at::Tensor & diag_embed_out(at::Tensor & out, const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) {
20599 return at::_ops::diag_embed_out::call(self, offset, dim1, dim2, out);
20600}
20601// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)
20602inline at::Tensor & diag_embed_outf(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
20603 return at::_ops::diag_embed_out::call(self, offset, dim1, dim2, out);
20604}
20605
20606// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)
20607inline at::Tensor & diagonal_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
20608 return at::_ops::diagonal_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out);
20609}
20610namespace symint {
20611 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20612 at::Tensor & diagonal_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
20613 return at::_ops::diagonal_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out);
20614 }
20615}
20616
20617// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)
20618inline at::Tensor & diagonal_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
20619 return at::_ops::diagonal_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out);
20620}
20621namespace symint {
20622 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20623 at::Tensor & diagonal_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
20624 return at::_ops::diagonal_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), offset, dim1, dim2, out);
20625 }
20626}
20627
20628// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)
20629inline at::Tensor & diagonal_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
20630 return at::_ops::diagonal_backward_out::call(grad_output, input_sizes, offset, dim1, dim2, out);
20631}
20632namespace symint {
20633 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20634 at::Tensor & diagonal_backward_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
20635 return at::_ops::diagonal_backward_out::call(grad_output, input_sizes, offset, dim1, dim2, out);
20636 }
20637}
20638
20639// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)
20640inline at::Tensor & diagonal_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
20641 return at::_ops::diagonal_backward_out::call(grad_output, input_sizes, offset, dim1, dim2, out);
20642}
20643namespace symint {
20644 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20645 at::Tensor & diagonal_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
20646 return at::_ops::diagonal_backward_out::call(grad_output, input_sizes, offset, dim1, dim2, out);
20647 }
20648}
20649
20650// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
20651inline at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
20652 return at::_ops::div_Scalar_out::call(self, other, out);
20653}
20654// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
20655inline at::Tensor & div_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
20656 return at::_ops::div_Scalar_out::call(self, other, out);
20657}
20658
20659// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
20660inline at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
20661 return at::_ops::div_Scalar_mode_out::call(self, other, rounding_mode, out);
20662}
20663// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)
20664inline at::Tensor & div_outf(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out) {
20665 return at::_ops::div_Scalar_mode_out::call(self, other, rounding_mode, out);
20666}
20667
20668// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)
20669inline at::Tensor & embedding_out(at::Tensor & out, const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) {
20670 return at::_ops::embedding_out::call(weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
20671}
20672namespace symint {
20673 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20674 at::Tensor & embedding_out(at::Tensor & out, const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) {
20675 return at::_ops::embedding_out::call(weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
20676 }
20677}
20678
20679// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)
20680inline at::Tensor & embedding_outf(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) {
20681 return at::_ops::embedding_out::call(weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
20682}
20683namespace symint {
20684 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20685 at::Tensor & embedding_outf(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) {
20686 return at::_ops::embedding_out::call(weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
20687 }
20688}
20689
20690// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)
20691inline at::Tensor & embedding_symint_out(at::Tensor & out, const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) {
20692 return at::_ops::embedding_out::call(weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
20693}
20694namespace symint {
20695 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20696 at::Tensor & embedding_out(at::Tensor & out, const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) {
20697 return at::_ops::embedding_out::call(weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
20698 }
20699}
20700
20701// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)
20702inline at::Tensor & embedding_symint_outf(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) {
20703 return at::_ops::embedding_out::call(weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
20704}
20705namespace symint {
20706 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20707 at::Tensor & embedding_outf(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) {
20708 return at::_ops::embedding_out::call(weight, indices, padding_idx, scale_grad_by_freq, sparse, out);
20709 }
20710}
20711
20712// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)
20713inline at::Tensor & embedding_dense_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
20714 return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out);
20715}
20716namespace symint {
20717 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20718 at::Tensor & embedding_dense_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
20719 return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out);
20720 }
20721}
20722
20723// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)
20724inline at::Tensor & embedding_dense_backward_outf(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, at::Tensor & out) {
20725 return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out);
20726}
20727namespace symint {
20728 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20729 at::Tensor & embedding_dense_backward_outf(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, at::Tensor & out) {
20730 return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out);
20731 }
20732}
20733
20734// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)
20735inline at::Tensor & embedding_dense_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
20736 return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out);
20737}
20738namespace symint {
20739 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20740 at::Tensor & embedding_dense_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
20741 return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out);
20742 }
20743}
20744
20745// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)
20746inline at::Tensor & embedding_dense_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) {
20747 return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out);
20748}
20749namespace symint {
20750 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20751 at::Tensor & embedding_dense_backward_outf(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) {
20752 return at::_ops::embedding_dense_backward_out::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out);
20753 }
20754}
20755
20756// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!)
20757inline at::Tensor & embedding_renorm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
20758 return at::_ops::embedding_renorm_out::call(self, indices, max_norm, norm_type, out);
20759}
20760// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!)
20761inline at::Tensor & embedding_renorm_outf(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) {
20762 return at::_ops::embedding_renorm_out::call(self, indices, max_norm, norm_type, out);
20763}
20764
20765// aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor
20766inline at::Tensor embedding_renorm(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
20767 return at::_ops::embedding_renorm::call(self, indices, max_norm, norm_type);
20768}
20769
20770// aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
20771inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_forward_only_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional<at::Tensor> & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1) {
20772 return at::_ops::_embedding_bag_forward_only_out::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3);
20773}
20774// aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
20775inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_forward_only_outf(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
20776 return at::_ops::_embedding_bag_forward_only_out::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3);
20777}
20778
20779// aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
20780inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional<at::Tensor> & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1) {
20781 return at::_ops::_embedding_bag_out::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3);
20782}
20783// aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
20784inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_outf(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
20785 return at::_ops::_embedding_bag_out::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3);
20786}
20787
20788// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
20789inline at::Tensor & _embedding_bag_dense_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
20790 return at::_ops::_embedding_bag_dense_backward_out::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out);
20791}
20792namespace symint {
20793 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20794 at::Tensor & _embedding_bag_dense_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
20795 return at::_ops::_embedding_bag_dense_backward_out::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out);
20796 }
20797}
20798
20799// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
20800inline at::Tensor & _embedding_bag_dense_backward_outf(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) {
20801 return at::_ops::_embedding_bag_dense_backward_out::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out);
20802}
20803namespace symint {
20804 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20805 at::Tensor & _embedding_bag_dense_backward_outf(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) {
20806 return at::_ops::_embedding_bag_dense_backward_out::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out);
20807 }
20808}
20809
20810// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
20811inline at::Tensor & _embedding_bag_dense_backward_symint_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
20812 return at::_ops::_embedding_bag_dense_backward_out::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out);
20813}
20814namespace symint {
20815 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20816 at::Tensor & _embedding_bag_dense_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
20817 return at::_ops::_embedding_bag_dense_backward_out::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out);
20818 }
20819}
20820
20821// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
20822inline at::Tensor & _embedding_bag_dense_backward_symint_outf(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) {
20823 return at::_ops::_embedding_bag_dense_backward_out::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out);
20824}
20825namespace symint {
20826 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20827 at::Tensor & _embedding_bag_dense_backward_outf(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) {
20828 return at::_ops::_embedding_bag_dense_backward_out::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out);
20829 }
20830}
20831
20832// aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
20833inline at::Tensor & _embedding_bag_per_sample_weights_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx=-1) {
20834 return at::_ops::_embedding_bag_per_sample_weights_backward_out::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx, out);
20835}
20836// aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)
20837inline at::Tensor & _embedding_bag_per_sample_weights_backward_outf(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) {
20838 return at::_ops::_embedding_bag_per_sample_weights_backward_out::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx, out);
20839}
20840
20841// aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
20843 return at::_ops::empty_names_out::call(size, names, memory_format, out);
20844}
20845// aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
20847 return at::_ops::empty_names_out::call(size, names, memory_format, out);
20848}
20849
20850// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
20851inline at::Tensor & new_empty_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
20852 return at::_ops::new_empty_out::call(self, c10::fromIntArrayRefSlow(size), out);
20853}
20854namespace symint {
20855 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20856 at::Tensor & new_empty_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
20857 return at::_ops::new_empty_out::call(self, c10::fromIntArrayRefSlow(size), out);
20858 }
20859}
20860
20861// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
20862inline at::Tensor & new_empty_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
20863 return at::_ops::new_empty_out::call(self, c10::fromIntArrayRefSlow(size), out);
20864}
20865namespace symint {
20866 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20867 at::Tensor & new_empty_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
20868 return at::_ops::new_empty_out::call(self, c10::fromIntArrayRefSlow(size), out);
20869 }
20870}
20871
20872// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
20873inline at::Tensor & new_empty_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
20874 return at::_ops::new_empty_out::call(self, size, out);
20875}
20876namespace symint {
20877 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20878 at::Tensor & new_empty_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
20879 return at::_ops::new_empty_out::call(self, size, out);
20880 }
20881}
20882
20883// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
20884inline at::Tensor & new_empty_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
20885 return at::_ops::new_empty_out::call(self, size, out);
20886}
20887namespace symint {
20888 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20889 at::Tensor & new_empty_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
20890 return at::_ops::new_empty_out::call(self, size, out);
20891 }
20892}
20893
20894// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
20895inline at::Tensor & new_empty_strided_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
20896 return at::_ops::new_empty_strided_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
20897}
20898namespace symint {
20899 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20900 at::Tensor & new_empty_strided_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
20901 return at::_ops::new_empty_strided_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
20902 }
20903}
20904
20905// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
20906inline at::Tensor & new_empty_strided_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) {
20907 return at::_ops::new_empty_strided_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
20908}
20909namespace symint {
20910 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20911 at::Tensor & new_empty_strided_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) {
20912 return at::_ops::new_empty_strided_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
20913 }
20914}
20915
20916// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
20917inline at::Tensor & new_empty_strided_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
20918 return at::_ops::new_empty_strided_out::call(self, size, stride, out);
20919}
20920namespace symint {
20921 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20922 at::Tensor & new_empty_strided_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
20923 return at::_ops::new_empty_strided_out::call(self, size, stride, out);
20924 }
20925}
20926
20927// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
20928inline at::Tensor & new_empty_strided_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
20929 return at::_ops::new_empty_strided_out::call(self, size, stride, out);
20930}
20931namespace symint {
20932 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20933 at::Tensor & new_empty_strided_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
20934 return at::_ops::new_empty_strided_out::call(self, size, stride, out);
20935 }
20936}
20937
20938// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
20939inline at::Tensor & new_full_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value) {
20940 return at::_ops::new_full_out::call(self, c10::fromIntArrayRefSlow(size), fill_value, out);
20941}
20942namespace symint {
20943 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20944 at::Tensor & new_full_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value) {
20945 return at::_ops::new_full_out::call(self, c10::fromIntArrayRefSlow(size), fill_value, out);
20946 }
20947}
20948
20949// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
20950inline at::Tensor & new_full_outf(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
20951 return at::_ops::new_full_out::call(self, c10::fromIntArrayRefSlow(size), fill_value, out);
20952}
20953namespace symint {
20954 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20955 at::Tensor & new_full_outf(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
20956 return at::_ops::new_full_out::call(self, c10::fromIntArrayRefSlow(size), fill_value, out);
20957 }
20958}
20959
20960// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
20961inline at::Tensor & new_full_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value) {
20962 return at::_ops::new_full_out::call(self, size, fill_value, out);
20963}
20964namespace symint {
20965 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20966 at::Tensor & new_full_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value) {
20967 return at::_ops::new_full_out::call(self, size, fill_value, out);
20968 }
20969}
20970
20971// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
20972inline at::Tensor & new_full_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
20973 return at::_ops::new_full_out::call(self, size, fill_value, out);
20974}
20975namespace symint {
20976 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
20977 at::Tensor & new_full_outf(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
20978 return at::_ops::new_full_out::call(self, size, fill_value, out);
20979 }
20980}
20981
20982// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
20983inline at::Tensor & new_zeros_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
20984 return at::_ops::new_zeros_out::call(self, c10::fromIntArrayRefSlow(size), out);
20985}
20986namespace symint {
20987 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20988 at::Tensor & new_zeros_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
20989 return at::_ops::new_zeros_out::call(self, c10::fromIntArrayRefSlow(size), out);
20990 }
20991}
20992
20993// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
20994inline at::Tensor & new_zeros_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
20995 return at::_ops::new_zeros_out::call(self, c10::fromIntArrayRefSlow(size), out);
20996}
20997namespace symint {
20998 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
20999 at::Tensor & new_zeros_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
21000 return at::_ops::new_zeros_out::call(self, c10::fromIntArrayRefSlow(size), out);
21001 }
21002}
21003
21004// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
21005inline at::Tensor & new_zeros_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
21006 return at::_ops::new_zeros_out::call(self, size, out);
21007}
21008namespace symint {
21009 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21010 at::Tensor & new_zeros_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
21011 return at::_ops::new_zeros_out::call(self, size, out);
21012 }
21013}
21014
21015// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
21016inline at::Tensor & new_zeros_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
21017 return at::_ops::new_zeros_out::call(self, size, out);
21018}
21019namespace symint {
21020 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21021 at::Tensor & new_zeros_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
21022 return at::_ops::new_zeros_out::call(self, size, out);
21023 }
21024}
21025
21026// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
21027inline at::Tensor & new_ones_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
21028 return at::_ops::new_ones_out::call(self, c10::fromIntArrayRefSlow(size), out);
21029}
21030namespace symint {
21031 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21032 at::Tensor & new_ones_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
21033 return at::_ops::new_ones_out::call(self, c10::fromIntArrayRefSlow(size), out);
21034 }
21035}
21036
21037// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
21038inline at::Tensor & new_ones_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
21039 return at::_ops::new_ones_out::call(self, c10::fromIntArrayRefSlow(size), out);
21040}
21041namespace symint {
21042 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21043 at::Tensor & new_ones_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
21044 return at::_ops::new_ones_out::call(self, c10::fromIntArrayRefSlow(size), out);
21045 }
21046}
21047
21048// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
21049inline at::Tensor & new_ones_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
21050 return at::_ops::new_ones_out::call(self, size, out);
21051}
21052namespace symint {
21053 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21054 at::Tensor & new_ones_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
21055 return at::_ops::new_ones_out::call(self, size, out);
21056 }
21057}
21058
21059// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
21060inline at::Tensor & new_ones_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
21061 return at::_ops::new_ones_out::call(self, size, out);
21062}
21063namespace symint {
21064 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21065 at::Tensor & new_ones_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
21066 return at::_ops::new_ones_out::call(self, size, out);
21067 }
21068}
21069
21070// aten::_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
21071inline at::Tensor & _empty_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous) {
21072 return at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out);
21073}
21074// aten::_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
21075inline at::Tensor & _empty_affine_quantized_outf(at::IntArrayRef size, double scale, int64_t zero_point, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
21076 return at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out);
21077}
21078
21079// aten::_empty_per_channel_affine_quantized.out(int[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
21080inline at::Tensor & _empty_per_channel_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format=MemoryFormat::Contiguous) {
21081 return at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales, zero_points, axis, memory_format, out);
21082}
21083// aten::_empty_per_channel_affine_quantized.out(int[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)
21084inline at::Tensor & _empty_per_channel_affine_quantized_outf(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
21085 return at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales, zero_points, axis, memory_format, out);
21086}
21087
21088// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
21089inline const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
21090 return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out);
21091}
21092namespace symint {
21093 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21094 const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
21095 return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out);
21096 }
21097}
21098
21099// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
21100inline const at::Tensor & resize_outf(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
21101 return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out);
21102}
21103namespace symint {
21104 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21105 const at::Tensor & resize_outf(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
21106 return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out);
21107 }
21108}
21109
21110// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
21111inline const at::Tensor & resize_symint_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
21112 return at::_ops::resize_out::call(self, size, memory_format, out);
21113}
21114namespace symint {
21115 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21116 const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
21117 return at::_ops::resize_out::call(self, size, memory_format, out);
21118 }
21119}
21120
21121// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
21122inline const at::Tensor & resize_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
21123 return at::_ops::resize_out::call(self, size, memory_format, out);
21124}
21125namespace symint {
21126 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21127 const at::Tensor & resize_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
21128 return at::_ops::resize_out::call(self, size, memory_format, out);
21129 }
21130}
21131
21132// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor
21133inline at::Tensor resize(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
21134 return at::_ops::resize::call(self, c10::fromIntArrayRefSlow(size), memory_format);
21135}
21136namespace symint {
21137 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21138 at::Tensor resize(const at::Tensor & self, at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
21139 return at::_ops::resize::call(self, c10::fromIntArrayRefSlow(size), memory_format);
21140 }
21141}
21142
21143// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor
21144inline at::Tensor resize_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
21145 return at::_ops::resize::call(self, size, memory_format);
21146}
21147namespace symint {
21148 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21149 at::Tensor resize(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
21150 return at::_ops::resize::call(self, size, memory_format);
21151 }
21152}
21153
21154// aten::_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)
21155inline const at::Tensor & _resize_output_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::Device device) {
21156 return at::_ops::_resize_output_out::call(self, size, device, out);
21157}
21158// aten::_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)
21159inline const at::Tensor & _resize_output_outf(const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out) {
21160 return at::_ops::_resize_output_out::call(self, size, device, out);
21161}
21162
21163// aten::_resize_output(Tensor self, int[] size, Device device) -> Tensor
21164inline at::Tensor _resize_output(const at::Tensor & self, at::IntArrayRef size, at::Device device) {
21165 return at::_ops::_resize_output::call(self, size, device);
21166}
21167
21168// aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
21169inline at::Tensor & empty_quantized_out(at::Tensor & out, at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
21170 return at::_ops::empty_quantized_out::call(size, qtensor, memory_format, out);
21171}
21172// aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
21173inline at::Tensor & empty_quantized_outf(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
21174 return at::_ops::empty_quantized_out::call(size, qtensor, memory_format, out);
21175}
21176
21177// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
21179 return at::_ops::empty_like_out::call(self, memory_format, out);
21180}
21181// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
21183 return at::_ops::empty_like_out::call(self, memory_format, out);
21184}
21185
21186// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
21187inline at::Tensor & empty_strided_out(at::Tensor & out, at::IntArrayRef size, at::IntArrayRef stride) {
21188 return at::_ops::empty_strided_out::call(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
21189}
21190namespace symint {
21191 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21192 at::Tensor & empty_strided_out(at::Tensor & out, at::IntArrayRef size, at::IntArrayRef stride) {
21193 return at::_ops::empty_strided_out::call(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
21194 }
21195}
21196
21197// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
21198inline at::Tensor & empty_strided_outf(at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) {
21199 return at::_ops::empty_strided_out::call(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
21200}
21201namespace symint {
21202 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21203 at::Tensor & empty_strided_outf(at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) {
21204 return at::_ops::empty_strided_out::call(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
21205 }
21206}
21207
21208// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
21209inline at::Tensor & empty_strided_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
21210 return at::_ops::empty_strided_out::call(size, stride, out);
21211}
21212namespace symint {
21213 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21214 at::Tensor & empty_strided_out(at::Tensor & out, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
21215 return at::_ops::empty_strided_out::call(size, stride, out);
21216 }
21217}
21218
21219// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
21220inline at::Tensor & empty_strided_symint_outf(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
21221 return at::_ops::empty_strided_out::call(size, stride, out);
21222}
21223namespace symint {
21224 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21225 at::Tensor & empty_strided_outf(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
21226 return at::_ops::empty_strided_out::call(size, stride, out);
21227 }
21228}
21229
21230// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
21231inline at::Tensor & fill_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & value) {
21232 return at::_ops::fill_Scalar_out::call(self, value, out);
21233}
21234// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
21235inline at::Tensor & fill_outf(const at::Tensor & self, const at::Scalar & value, at::Tensor & out) {
21236 return at::_ops::fill_Scalar_out::call(self, value, out);
21237}
21238
21239// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
21240inline at::Tensor & fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & value) {
21241 return at::_ops::fill_Tensor_out::call(self, value, out);
21242}
21243// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
21244inline at::Tensor & fill_outf(const at::Tensor & self, const at::Tensor & value, at::Tensor & out) {
21245 return at::_ops::fill_Tensor_out::call(self, value, out);
21246}
21247
21248// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
21249inline at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names) {
21250 return at::_ops::full_names_out::call(size, fill_value, names, out);
21251}
21252// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
21253inline at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::Tensor & out) {
21254 return at::_ops::full_names_out::call(size, fill_value, names, out);
21255}
21256
21257// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
21258inline at::Tensor & full_like_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
21259 return at::_ops::full_like_out::call(self, fill_value, memory_format, out);
21260}
21261// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
21262inline at::Tensor & full_like_outf(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
21263 return at::_ops::full_like_out::call(self, fill_value, memory_format, out);
21264}
21265
21266// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)
21268 return at::_ops::from_file_out::call(filename, shared, size, out);
21269}
21270// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)
21271inline at::Tensor & from_file_outf(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, at::Tensor & out) {
21272 return at::_ops::from_file_out::call(filename, shared, size, out);
21273}
21274
21275// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
21276inline at::Tensor & grid_sampler_2d_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
21277 return at::_ops::grid_sampler_2d_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out);
21278}
21279// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
21280inline at::Tensor & grid_sampler_2d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
21281 return at::_ops::grid_sampler_2d_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out);
21282}
21283
21284// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
21285inline ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_2d_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
21286 return at::_ops::grid_sampler_2d_backward_out::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1);
21287}
21288// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
21289inline ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
21290 return at::_ops::grid_sampler_2d_backward_out::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1);
21291}
21292
21293// aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
21294inline at::Tensor & _grid_sampler_2d_cpu_fallback_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
21295 return at::_ops::_grid_sampler_2d_cpu_fallback_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out);
21296}
21297// aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
21298inline at::Tensor & _grid_sampler_2d_cpu_fallback_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
21299 return at::_ops::_grid_sampler_2d_cpu_fallback_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out);
21300}
21301
21302// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
21303inline at::Tensor & grid_sampler_3d_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
21304 return at::_ops::grid_sampler_3d_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out);
21305}
21306// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
21307inline at::Tensor & grid_sampler_3d_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
21308 return at::_ops::grid_sampler_3d_out::call(input, grid, interpolation_mode, padding_mode, align_corners, out);
21309}
21310
21311// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
21312inline ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_3d_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
21313 return at::_ops::grid_sampler_3d_backward_out::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1);
21314}
21315// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
21316inline ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
21317 return at::_ops::grid_sampler_3d_backward_out::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1);
21318}
21319
21320// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
21321inline at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length) {
21322 return at::_ops::hann_window_out::call(window_length, out);
21323}
21324// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
21325inline at::Tensor & hann_window_outf(int64_t window_length, at::Tensor & out) {
21326 return at::_ops::hann_window_out::call(window_length, out);
21327}
21328
21329// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
21330inline at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length, bool periodic) {
21331 return at::_ops::hann_window_periodic_out::call(window_length, periodic, out);
21332}
21333// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
21334inline at::Tensor & hann_window_outf(int64_t window_length, bool periodic, at::Tensor & out) {
21335 return at::_ops::hann_window_periodic_out::call(window_length, periodic, out);
21336}
21337
21338// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
21339inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length) {
21340 return at::_ops::hamming_window_out::call(window_length, out);
21341}
21342// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
21343inline at::Tensor & hamming_window_outf(int64_t window_length, at::Tensor & out) {
21344 return at::_ops::hamming_window_out::call(window_length, out);
21345}
21346
21347// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
21348inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic) {
21349 return at::_ops::hamming_window_periodic_out::call(window_length, periodic, out);
21350}
21351// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
21352inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, at::Tensor & out) {
21353 return at::_ops::hamming_window_periodic_out::call(window_length, periodic, out);
21354}
21355
21356// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)
21357inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha) {
21358 return at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out);
21359}
21360// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)
21361inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, at::Tensor & out) {
21362 return at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out);
21363}
21364
21365// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)
21366inline at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha, double beta) {
21367 return at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out);
21368}
21369// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)
21370inline at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) {
21371 return at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out);
21372}
21373
21374// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
21375inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length) {
21376 return at::_ops::kaiser_window_out::call(window_length, out);
21377}
21378// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)
21379inline at::Tensor & kaiser_window_outf(int64_t window_length, at::Tensor & out) {
21380 return at::_ops::kaiser_window_out::call(window_length, out);
21381}
21382
21383// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
21384inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic) {
21385 return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out);
21386}
21387// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)
21388inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, at::Tensor & out) {
21389 return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out);
21390}
21391
21392// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)
21393inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic, double beta) {
21394 return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out);
21395}
21396// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)
21397inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, double beta, at::Tensor & out) {
21398 return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out);
21399}
21400
21401// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21402inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) {
21403 return at::_ops::native_group_norm_out::call(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
21404}
21405namespace symint {
21406 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21407 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) {
21408 return at::_ops::native_group_norm_out::call(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
21409 }
21410}
21411
21412// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21413inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21414 return at::_ops::native_group_norm_out::call(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
21415}
21416namespace symint {
21417 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21418 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21419 return at::_ops::native_group_norm_out::call(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
21420 }
21421}
21422
21423// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21424inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
21425 return at::_ops::native_group_norm_out::call(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
21426}
21427namespace symint {
21428 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21429 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
21430 return at::_ops::native_group_norm_out::call(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
21431 }
21432}
21433
21434// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21435inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_symint_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21436 return at::_ops::native_group_norm_out::call(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
21437}
21438namespace symint {
21439 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21440 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_outf(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21441 return at::_ops::native_group_norm_out::call(input, weight, bias, N, C, HxW, group, eps, out0, out1, out2);
21442 }
21443}
21444
21445// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21446inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array<bool,3> output_mask) {
21447 return at::_ops::native_group_norm_backward_out::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2);
21448}
21449namespace symint {
21450 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21451 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array<bool,3> output_mask) {
21452 return at::_ops::native_group_norm_backward_out::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2);
21453 }
21454}
21455
21456// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21457inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21458 return at::_ops::native_group_norm_backward_out::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2);
21459}
21460namespace symint {
21461 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21462 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21463 return at::_ops::native_group_norm_backward_out::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2);
21464 }
21465}
21466
21467// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21468inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
21469 return at::_ops::native_group_norm_backward_out::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2);
21470}
21471namespace symint {
21472 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21473 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
21474 return at::_ops::native_group_norm_backward_out::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2);
21475 }
21476}
21477
21478// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21479inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_symint_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21480 return at::_ops::native_group_norm_backward_out::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2);
21481}
21482namespace symint {
21483 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21484 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21485 return at::_ops::native_group_norm_backward_out::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2);
21486 }
21487}
21488
21489// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
21490inline at::Tensor & index_put_out(at::Tensor & out, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false) {
21491 return at::_ops::index_put_out::call(self, indices, values, accumulate, out);
21492}
21493// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
21494inline at::Tensor & index_put_outf(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) {
21495 return at::_ops::index_put_out::call(self, indices, values, accumulate, out);
21496}
21497
21498// aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)
21499inline at::Tensor & _index_put_impl_out(at::Tensor & out, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) {
21500 return at::_ops::_index_put_impl_out::call(self, indices, values, accumulate, unsafe, out);
21501}
21502// aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)
21503inline at::Tensor & _index_put_impl_outf(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) {
21504 return at::_ops::_index_put_impl_out::call(self, indices, values, accumulate, unsafe, out);
21505}
21506
21507// aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor
21508inline at::Tensor _index_put_impl(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) {
21509 return at::_ops::_index_put_impl::call(self, indices, values, accumulate, unsafe);
21510}
21511
21512// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
21513inline at::Tensor & isnan_out(at::Tensor & out, const at::Tensor & self) {
21514 return at::_ops::isnan_out::call(self, out);
21515}
21516// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
21517inline at::Tensor & isnan_outf(const at::Tensor & self, at::Tensor & out) {
21518 return at::_ops::isnan_out::call(self, out);
21519}
21520
21521// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21522inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
21523 return at::_ops::native_layer_norm_out::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, out0, out1, out2);
21524}
21525namespace symint {
21526 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21527 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
21528 return at::_ops::native_layer_norm_out::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, out0, out1, out2);
21529 }
21530}
21531
21532// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21533inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_outf(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21534 return at::_ops::native_layer_norm_out::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, out0, out1, out2);
21535}
21536namespace symint {
21537 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21538 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_outf(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21539 return at::_ops::native_layer_norm_out::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, out0, out1, out2);
21540 }
21541}
21542
21543// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21544inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
21545 return at::_ops::native_layer_norm_out::call(input, normalized_shape, weight, bias, eps, out0, out1, out2);
21546}
21547namespace symint {
21548 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21549 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
21550 return at::_ops::native_layer_norm_out::call(input, normalized_shape, weight, bias, eps, out0, out1, out2);
21551 }
21552}
21553
21554// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21555inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_symint_outf(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21556 return at::_ops::native_layer_norm_out::call(input, normalized_shape, weight, bias, eps, out0, out1, out2);
21557}
21558namespace symint {
21559 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21560 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_outf(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21561 return at::_ops::native_layer_norm_out::call(input, normalized_shape, weight, bias, eps, out0, out1, out2);
21562 }
21563}
21564
21565// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21566inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
21567 return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2);
21568}
21569namespace symint {
21570 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21571 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
21572 return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2);
21573 }
21574}
21575
21576// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21577inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21578 return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2);
21579}
21580namespace symint {
21581 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21582 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21583 return at::_ops::native_layer_norm_backward_out::call(grad_out, input, c10::fromIntArrayRefSlow(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2);
21584 }
21585}
21586
21587// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21588inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
21589 return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2);
21590}
21591namespace symint {
21592 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21593 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
21594 return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2);
21595 }
21596}
21597
21598// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21599inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_symint_outf(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21600 return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2);
21601}
21602namespace symint {
21603 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21604 ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21605 return at::_ops::native_layer_norm_backward_out::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2);
21606 }
21607}
21608
21609// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21610inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linear_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
21611 return at::_ops::linear_backward_out::call(self, grad_output, weight, output_mask, out0, out1, out2);
21612}
21613// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21614inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linear_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21615 return at::_ops::linear_backward_out::call(self, grad_output, weight, output_mask, out0, out1, out2);
21616}
21617
21618// aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
21619inline at::Tensor & mkldnn_linear_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias={}) {
21620 return at::_ops::mkldnn_linear_out::call(self, weight, bias, out);
21621}
21622// aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
21623inline at::Tensor & mkldnn_linear_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::Tensor & out) {
21624 return at::_ops::mkldnn_linear_out::call(self, weight, bias, out);
21625}
21626
21627// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
21628inline at::Tensor & mkldnn_linear_backward_input_out(at::Tensor & out, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) {
21629 return at::_ops::mkldnn_linear_backward_input_out::call(input_size, grad_output, weight, out);
21630}
21631// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)
21632inline at::Tensor & mkldnn_linear_backward_input_outf(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) {
21633 return at::_ops::mkldnn_linear_backward_input_out::call(input_size, grad_output, weight, out);
21634}
21635
21636// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
21637inline ::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) {
21638 return at::_ops::mkldnn_linear_backward_weights_out::call(grad_output, input, weight, bias_defined, out0, out1);
21639}
21640// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
21641inline ::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) {
21642 return at::_ops::mkldnn_linear_backward_weights_out::call(grad_output, input, weight, bias_defined, out0, out1);
21643}
21644
21645// aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21646inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_linear_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
21647 return at::_ops::mkldnn_linear_backward_out::call(self, grad_output, weight, output_mask, out0, out1, out2);
21648}
21649// aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21650inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_linear_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21651 return at::_ops::mkldnn_linear_backward_out::call(self, grad_output, weight, output_mask, out0, out1, out2);
21652}
21653
21654// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
21655inline ::std::tuple<at::Tensor &,at::Tensor &> matmul_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask) {
21656 return at::_ops::matmul_backward_out::call(grad, self, other, mask, out0, out1);
21657}
21658// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
21659inline ::std::tuple<at::Tensor &,at::Tensor &> matmul_backward_outf(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask, at::Tensor & out0, at::Tensor & out1) {
21660 return at::_ops::matmul_backward_out::call(grad, self, other, mask, out0, out1);
21661}
21662
21663// aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
21664inline ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self) {
21665 return at::_ops::_aminmax_out::call(self, out0, out1);
21666}
21667// aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
21668inline ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_outf(const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) {
21669 return at::_ops::_aminmax_out::call(self, out0, out1);
21670}
21671
21672// aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
21673inline ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, int64_t dim, bool keepdim=false) {
21674 return at::_ops::_aminmax_dim_out::call(self, dim, keepdim, out0, out1);
21675}
21676// aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
21677inline ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
21678 return at::_ops::_aminmax_dim_out::call(self, dim, keepdim, out0, out1);
21679}
21680
21681// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21682inline at::Tensor & max_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
21683 return at::_ops::max_pool2d_backward_out::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out);
21684}
21685// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21686inline at::Tensor & max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
21687 return at::_ops::max_pool2d_backward_out::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out);
21688}
21689
21690// aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21691inline at::Tensor & mkldnn_max_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
21692 return at::_ops::mkldnn_max_pool2d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
21693}
21694// aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21695inline at::Tensor & mkldnn_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
21696 return at::_ops::mkldnn_max_pool2d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
21697}
21698
21699// aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21700inline at::Tensor & mkldnn_max_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
21701 return at::_ops::mkldnn_max_pool2d_backward_out::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out);
21702}
21703// aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21704inline at::Tensor & mkldnn_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
21705 return at::_ops::mkldnn_max_pool2d_backward_out::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out);
21706}
21707
21708// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21709inline at::Tensor & mkldnn_max_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
21710 return at::_ops::mkldnn_max_pool3d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
21711}
21712// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21713inline at::Tensor & mkldnn_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
21714 return at::_ops::mkldnn_max_pool3d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
21715}
21716
21717// aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21718inline at::Tensor & mkldnn_max_pool3d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
21719 return at::_ops::mkldnn_max_pool3d_backward_out::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out);
21720}
21721// aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21722inline at::Tensor & mkldnn_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
21723 return at::_ops::mkldnn_max_pool3d_backward_out::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out);
21724}
21725
21726// aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21727inline at::Tensor & quantized_max_pool1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
21728 return at::_ops::quantized_max_pool1d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
21729}
21730// aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21731inline at::Tensor & quantized_max_pool1d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
21732 return at::_ops::quantized_max_pool1d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
21733}
21734
21735// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21736inline at::Tensor & quantized_max_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) {
21737 return at::_ops::quantized_max_pool2d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
21738}
21739// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)
21740inline at::Tensor & quantized_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
21741 return at::_ops::quantized_max_pool2d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out);
21742}
21743
21744// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
21745inline at::Tensor & median_out(at::Tensor & out, const at::Tensor & self) {
21746 return at::_ops::median_out::call(self, out);
21747}
21748// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
21749inline at::Tensor & median_outf(const at::Tensor & self, at::Tensor & out) {
21750 return at::_ops::median_out::call(self, out);
21751}
21752
21753// aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
21754inline at::Tensor & nanmedian_out(at::Tensor & out, const at::Tensor & self) {
21755 return at::_ops::nanmedian_out::call(self, out);
21756}
21757// aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
21758inline at::Tensor & nanmedian_outf(const at::Tensor & self, at::Tensor & out) {
21759 return at::_ops::nanmedian_out::call(self, out);
21760}
21761
21762// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
21763inline at::Tensor & _mps_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
21764 return at::_ops::_mps_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out);
21765}
21766// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
21767inline at::Tensor & _mps_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
21768 return at::_ops::_mps_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out);
21769}
21770
21771// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21772inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mps_convolution_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,3> output_mask) {
21773 return at::_ops::mps_convolution_backward_out::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2);
21774}
21775// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21776inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mps_convolution_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21777 return at::_ops::mps_convolution_backward_out::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2);
21778}
21779
21780// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
21781inline at::Tensor & mkldnn_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
21782 return at::_ops::mkldnn_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, out);
21783}
21784namespace symint {
21785 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21786 at::Tensor & mkldnn_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
21787 return at::_ops::mkldnn_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, out);
21788 }
21789}
21790
21791// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
21792inline at::Tensor & mkldnn_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
21793 return at::_ops::mkldnn_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, out);
21794}
21795namespace symint {
21796 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21797 at::Tensor & mkldnn_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
21798 return at::_ops::mkldnn_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, out);
21799 }
21800}
21801
21802// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
21803inline at::Tensor & mkldnn_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
21804 return at::_ops::mkldnn_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out);
21805}
21806namespace symint {
21807 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21808 at::Tensor & mkldnn_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
21809 return at::_ops::mkldnn_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out);
21810 }
21811}
21812
21813// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)
21814inline at::Tensor & mkldnn_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
21815 return at::_ops::mkldnn_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out);
21816}
21817namespace symint {
21818 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21819 at::Tensor & mkldnn_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
21820 return at::_ops::mkldnn_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out);
21821 }
21822}
21823
21824// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
21825inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
21826 return at::_ops::mkldnn_rnn_layer_out::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3);
21827}
21828// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
21829inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_outf(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
21830 return at::_ops::mkldnn_rnn_layer_out::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3);
21831}
21832
21833// aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
21834inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
21835 return at::_ops::mkldnn_rnn_layer_backward_out::call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace, out0, out1, out2, out3, out4, out5, out6);
21836}
21837// aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))
21838inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_backward_outf(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6) {
21839 return at::_ops::mkldnn_rnn_layer_backward_out::call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace, out0, out1, out2, out3, out4, out5, out6);
21840}
21841
21842// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21843inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
21844 return at::_ops::miopen_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2);
21845}
21846// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21847inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21848 return at::_ops::miopen_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2);
21849}
21850
21851// aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21852inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon) {
21853 return at::_ops::miopen_batch_norm_backward_out::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2);
21854}
21855// aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
21856inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_backward_outf(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
21857 return at::_ops::miopen_batch_norm_backward_out::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2);
21858}
21859
21860// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
21861inline at::Tensor & miopen_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
21862 return at::_ops::miopen_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out);
21863}
21864namespace symint {
21865 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21866 at::Tensor & miopen_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
21867 return at::_ops::miopen_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out);
21868 }
21869}
21870
21871// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
21872inline at::Tensor & miopen_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
21873 return at::_ops::miopen_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out);
21874}
21875namespace symint {
21876 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21877 at::Tensor & miopen_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
21878 return at::_ops::miopen_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out);
21879 }
21880}
21881
21882// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
21883inline at::Tensor & miopen_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
21884 return at::_ops::miopen_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
21885}
21886namespace symint {
21887 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21888 at::Tensor & miopen_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
21889 return at::_ops::miopen_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
21890 }
21891}
21892
21893// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
21894inline at::Tensor & miopen_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
21895 return at::_ops::miopen_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
21896}
21897namespace symint {
21898 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21899 at::Tensor & miopen_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
21900 return at::_ops::miopen_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
21901 }
21902}
21903
21904// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
21905inline at::Tensor & miopen_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
21906 return at::_ops::miopen_convolution_transpose_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), stride, dilation, groups, benchmark, deterministic, out);
21907}
21908namespace symint {
21909 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21910 at::Tensor & miopen_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
21911 return at::_ops::miopen_convolution_transpose_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), stride, dilation, groups, benchmark, deterministic, out);
21912 }
21913}
21914
21915// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
21916inline at::Tensor & miopen_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
21917 return at::_ops::miopen_convolution_transpose_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), stride, dilation, groups, benchmark, deterministic, out);
21918}
21919namespace symint {
21920 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21921 at::Tensor & miopen_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
21922 return at::_ops::miopen_convolution_transpose_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), stride, dilation, groups, benchmark, deterministic, out);
21923 }
21924}
21925
21926// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
21927inline at::Tensor & miopen_convolution_transpose_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
21928 return at::_ops::miopen_convolution_transpose_out::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out);
21929}
21930namespace symint {
21931 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21932 at::Tensor & miopen_convolution_transpose_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
21933 return at::_ops::miopen_convolution_transpose_out::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out);
21934 }
21935}
21936
21937// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
21938inline at::Tensor & miopen_convolution_transpose_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
21939 return at::_ops::miopen_convolution_transpose_out::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out);
21940}
21941namespace symint {
21942 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21943 at::Tensor & miopen_convolution_transpose_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
21944 return at::_ops::miopen_convolution_transpose_out::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out);
21945 }
21946}
21947
21948// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
21949inline at::Tensor & miopen_depthwise_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
21950 return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out);
21951}
21952namespace symint {
21953 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21954 at::Tensor & miopen_depthwise_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
21955 return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out);
21956 }
21957}
21958
21959// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
21960inline at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
21961 return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out);
21962}
21963namespace symint {
21964 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
21965 at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
21966 return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out);
21967 }
21968}
21969
21970// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
21971inline at::Tensor & miopen_depthwise_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
21972 return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
21973}
21974namespace symint {
21975 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21976 at::Tensor & miopen_depthwise_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
21977 return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
21978 }
21979}
21980
21981// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)
21982inline at::Tensor & miopen_depthwise_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
21983 return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
21984}
21985namespace symint {
21986 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
21987 at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) {
21988 return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out);
21989 }
21990}
21991
21992// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
21993inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
21994 return at::_ops::miopen_rnn_out::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
21995}
21996// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
21997inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
21998 return at::_ops::miopen_rnn_out::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4);
21999}
22000
22001// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
22002inline void miopen_rnn_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
22003 return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
22004}
22005// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()
22006inline void miopen_rnn_backward_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
22007 return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
22008}
22009
22010// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
22011inline at::Tensor & _sparse_sparse_matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
22012 return at::_ops::_sparse_sparse_matmul_out::call(self, other, out);
22013}
22014// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
22015inline at::Tensor & _sparse_sparse_matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
22016 return at::_ops::_sparse_sparse_matmul_out::call(self, other, out);
22017}
22018
22019// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
22020inline at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
22021 return at::_ops::mul_Scalar_out::call(self, other, out);
22022}
22023// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
22024inline at::Tensor & mul_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
22025 return at::_ops::mul_Scalar_out::call(self, other, out);
22026}
22027
22028// aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
22029inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) {
22030 return at::_ops::_native_batch_norm_legit_functional::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
22031}
22032
22033// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
22034inline ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, double eps) {
22035 return at::_ops::batch_norm_stats_out::call(input, eps, out0, out1);
22036}
22037// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
22038inline ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_stats_outf(const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) {
22039 return at::_ops::batch_norm_stats_out::call(input, eps, out0, out1);
22040}
22041
22042// aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
22043inline ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count) {
22044 return at::_ops::batch_norm_gather_stats_out::call(input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1);
22045}
22046// aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
22047inline ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_outf(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1) {
22048 return at::_ops::batch_norm_gather_stats_out::call(input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1);
22049}
22050
22051// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
22052inline ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
22053 return at::_ops::batch_norm_gather_stats_with_counts_out::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1);
22054}
22055// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
22056inline ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_outf(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) {
22057 return at::_ops::batch_norm_gather_stats_with_counts_out::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1);
22058}
22059
22060// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
22061inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
22062 return at::_ops::native_batch_norm_backward_out::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2);
22063}
22064// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
22065inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
22066 return at::_ops::native_batch_norm_backward_out::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2);
22067}
22068
22069// aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
22070inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
22071 return at::_ops::batch_norm_backward_reduce_out::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3);
22072}
22073// aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
22074inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
22075 return at::_ops::batch_norm_backward_reduce_out::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3);
22076}
22077
22078// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)
22079inline at::Tensor & batch_norm_backward_elemt_out(at::Tensor & out, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count) {
22080 return at::_ops::batch_norm_backward_elemt_out::call(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count, out);
22081}
22082// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)
22083inline at::Tensor & batch_norm_backward_elemt_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count, at::Tensor & out) {
22084 return at::_ops::batch_norm_backward_elemt_out::call(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count, out);
22085}
22086
22087// aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
22088inline ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum) {
22089 return at::_ops::batch_norm_update_stats_out::call(input, running_mean, running_var, momentum, out0, out1);
22090}
22091// aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
22092inline ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_outf(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, at::Tensor & out0, at::Tensor & out1) {
22093 return at::_ops::batch_norm_update_stats_out::call(input, running_mean, running_var, momentum, out0, out1);
22094}
22095
22096// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
22097inline at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) {
22098 return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), stride, out);
22099}
22100namespace symint {
22101 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22102 at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) {
22103 return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), stride, out);
22104 }
22105}
22106
22107// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
22108inline at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
22109 return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), stride, out);
22110}
22111namespace symint {
22112 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22113 at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
22114 return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), stride, out);
22115 }
22116}
22117
22118// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
22119inline at::Tensor & _nnpack_spatial_convolution_symint_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride=1) {
22120 return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out);
22121}
22122namespace symint {
22123 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22124 at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride=1) {
22125 return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out);
22126 }
22127}
22128
22129// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
22130inline at::Tensor & _nnpack_spatial_convolution_symint_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
22131 return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out);
22132}
22133namespace symint {
22134 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22135 at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
22136 return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out);
22137 }
22138}
22139
22140// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22141inline at::Tensor & ones_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::DimnameList> names) {
22142 return at::_ops::ones_names_out::call(size, names, out);
22143}
22144// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22145inline at::Tensor & ones_outf(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
22146 return at::_ops::ones_names_out::call(size, names, out);
22147}
22148
22149// aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
22151 return at::_ops::ones_like_out::call(self, memory_format, out);
22152}
22153// aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
22155 return at::_ops::ones_like_out::call(self, memory_format, out);
22156}
22157
22158// aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!)
22159inline at::Tensor & _euclidean_dist_out(at::Tensor & out, const at::Tensor & x1, const at::Tensor & x2) {
22160 return at::_ops::_euclidean_dist_out::call(x1, x2, out);
22161}
22162// aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!)
22163inline at::Tensor & _euclidean_dist_outf(const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) {
22164 return at::_ops::_euclidean_dist_out::call(x1, x2, out);
22165}
22166
22167// aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)
22168inline at::Tensor & _cdist_forward_out(at::Tensor & out, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
22169 return at::_ops::_cdist_forward_out::call(x1, x2, p, compute_mode, out);
22170}
22171// aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)
22172inline at::Tensor & _cdist_forward_outf(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode, at::Tensor & out) {
22173 return at::_ops::_cdist_forward_out::call(x1, x2, p, compute_mode, out);
22174}
22175
22176// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)
22177inline at::Tensor & _cdist_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
22178 return at::_ops::_cdist_backward_out::call(grad, x1, x2, p, cdist, out);
22179}
22180// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)
22181inline at::Tensor & _cdist_backward_outf(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) {
22182 return at::_ops::_cdist_backward_out::call(grad, x1, x2, p, cdist, out);
22183}
22184
22185// aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)
22186inline at::Tensor & _pdist_forward_out(at::Tensor & out, const at::Tensor & self, double p=2) {
22187 return at::_ops::_pdist_forward_out::call(self, p, out);
22188}
22189// aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)
22190inline at::Tensor & _pdist_forward_outf(const at::Tensor & self, double p, at::Tensor & out) {
22191 return at::_ops::_pdist_forward_out::call(self, p, out);
22192}
22193
22194// aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)
22195inline at::Tensor & _pdist_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
22196 return at::_ops::_pdist_backward_out::call(grad, self, p, pdist, out);
22197}
22198// aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)
22199inline at::Tensor & _pdist_backward_outf(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out) {
22200 return at::_ops::_pdist_backward_out::call(grad, self, p, pdist, out);
22201}
22202
22203// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)
22204inline at::Tensor & pixel_shuffle_out(at::Tensor & out, const at::Tensor & self, int64_t upscale_factor) {
22205 return at::_ops::pixel_shuffle_out::call(self, upscale_factor, out);
22206}
22207// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)
22208inline at::Tensor & pixel_shuffle_outf(const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) {
22209 return at::_ops::pixel_shuffle_out::call(self, upscale_factor, out);
22210}
22211
22212// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)
22213inline at::Tensor & pixel_unshuffle_out(at::Tensor & out, const at::Tensor & self, int64_t downscale_factor) {
22214 return at::_ops::pixel_unshuffle_out::call(self, downscale_factor, out);
22215}
22216// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)
22217inline at::Tensor & pixel_unshuffle_outf(const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) {
22218 return at::_ops::pixel_unshuffle_out::call(self, downscale_factor, out);
22219}
22220
22221// aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!)
22222inline at::Tensor & channel_shuffle_out(at::Tensor & out, const at::Tensor & self, int64_t groups) {
22223 return at::_ops::channel_shuffle_out::call(self, groups, out);
22224}
22225// aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!)
22226inline at::Tensor & channel_shuffle_outf(const at::Tensor & self, int64_t groups, at::Tensor & out) {
22227 return at::_ops::channel_shuffle_out::call(self, groups, out);
22228}
22229
22230// aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)
22232 return at::_ops::_pin_memory_out::call(self, device, out);
22233}
22234// aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)
22236 return at::_ops::_pin_memory_out::call(self, device, out);
22237}
22238
22239// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)
22240inline at::Tensor & scalar_tensor_out(at::Tensor & out, const at::Scalar & s) {
22241 return at::_ops::scalar_tensor_out::call(s, out);
22242}
22243// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)
22244inline at::Tensor & scalar_tensor_outf(const at::Scalar & s, at::Tensor & out) {
22245 return at::_ops::scalar_tensor_out::call(s, out);
22246}
22247
22248// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22249inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::DimnameList> names) {
22250 return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
22251}
22252namespace symint {
22253 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22255 return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
22256 }
22257}
22258
22259// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22260inline at::Tensor & rand_outf(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
22261 return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
22262}
22263namespace symint {
22264 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22266 return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
22267 }
22268}
22269
22270// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22271inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::DimnameList> names) {
22272 return at::_ops::rand_names_out::call(size, names, out);
22273}
22274namespace symint {
22275 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22277 return at::_ops::rand_names_out::call(size, names, out);
22278 }
22279}
22280
22281// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22282inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
22283 return at::_ops::rand_names_out::call(size, names, out);
22284}
22285namespace symint {
22286 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22288 return at::_ops::rand_names_out::call(size, names, out);
22289 }
22290}
22291
22292// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22294 return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
22295}
22296namespace symint {
22297 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22299 return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
22300 }
22301}
22302
22303// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22305 return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
22306}
22307namespace symint {
22308 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22310 return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
22311 }
22312}
22313
22314// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22316 return at::_ops::rand_generator_with_names_out::call(size, generator, names, out);
22317}
22318namespace symint {
22319 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22321 return at::_ops::rand_generator_with_names_out::call(size, generator, names, out);
22322 }
22323}
22324
22325// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22327 return at::_ops::rand_generator_with_names_out::call(size, generator, names, out);
22328}
22329namespace symint {
22330 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22332 return at::_ops::rand_generator_with_names_out::call(size, generator, names, out);
22333 }
22334}
22335
22336// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
22338 return at::_ops::rand_like_out::call(self, memory_format, out);
22339}
22340// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
22342 return at::_ops::rand_like_out::call(self, memory_format, out);
22343}
22344
22345// aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
22346inline at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t high, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
22347 return at::_ops::randint_like_out::call(self, high, memory_format, out);
22348}
22349// aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
22350inline at::Tensor & randint_like_outf(const at::Tensor & self, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
22351 return at::_ops::randint_like_out::call(self, high, memory_format, out);
22352}
22353
22354// aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
22355inline at::Tensor & randint_like_out(at::Tensor & out, const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
22356 return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out);
22357}
22358// aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
22359inline at::Tensor & randint_like_outf(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
22360 return at::_ops::randint_like_low_dtype_out::call(self, low, high, memory_format, out);
22361}
22362
22363// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22364inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::DimnameList> names) {
22365 return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
22366}
22367namespace symint {
22368 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22370 return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
22371 }
22372}
22373
22374// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22375inline at::Tensor & randn_outf(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
22376 return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
22377}
22378namespace symint {
22379 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22381 return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out);
22382 }
22383}
22384
22385// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22386inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional<at::DimnameList> names) {
22387 return at::_ops::randn_names_out::call(size, names, out);
22388}
22389namespace symint {
22390 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22392 return at::_ops::randn_names_out::call(size, names, out);
22393 }
22394}
22395
22396// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22398 return at::_ops::randn_names_out::call(size, names, out);
22399}
22400namespace symint {
22401 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22403 return at::_ops::randn_names_out::call(size, names, out);
22404 }
22405}
22406
22407// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22409 return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
22410}
22411namespace symint {
22412 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22414 return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
22415 }
22416}
22417
22418// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22420 return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
22421}
22422namespace symint {
22423 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22425 return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out);
22426 }
22427}
22428
22429// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22431 return at::_ops::randn_generator_with_names_out::call(size, generator, names, out);
22432}
22433namespace symint {
22434 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22436 return at::_ops::randn_generator_with_names_out::call(size, generator, names, out);
22437 }
22438}
22439
22440// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
22442 return at::_ops::randn_generator_with_names_out::call(size, generator, names, out);
22443}
22444namespace symint {
22445 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22447 return at::_ops::randn_generator_with_names_out::call(size, generator, names, out);
22448 }
22449}
22450
22451// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
22453 return at::_ops::randn_like_out::call(self, memory_format, out);
22454}
22455// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
22457 return at::_ops::randn_like_out::call(self, memory_format, out);
22458}
22459
22460// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)
22461inline at::Tensor & repeat_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef repeats) {
22462 return at::_ops::repeat_out::call(self, c10::fromIntArrayRefSlow(repeats), out);
22463}
22464namespace symint {
22465 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22466 at::Tensor & repeat_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef repeats) {
22467 return at::_ops::repeat_out::call(self, c10::fromIntArrayRefSlow(repeats), out);
22468 }
22469}
22470
22471// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)
22472inline at::Tensor & repeat_outf(const at::Tensor & self, at::IntArrayRef repeats, at::Tensor & out) {
22473 return at::_ops::repeat_out::call(self, c10::fromIntArrayRefSlow(repeats), out);
22474}
22475namespace symint {
22476 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22477 at::Tensor & repeat_outf(const at::Tensor & self, at::IntArrayRef repeats, at::Tensor & out) {
22478 return at::_ops::repeat_out::call(self, c10::fromIntArrayRefSlow(repeats), out);
22479 }
22480}
22481
22482// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)
22483inline at::Tensor & repeat_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef repeats) {
22484 return at::_ops::repeat_out::call(self, repeats, out);
22485}
22486namespace symint {
22487 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22488 at::Tensor & repeat_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef repeats) {
22489 return at::_ops::repeat_out::call(self, repeats, out);
22490 }
22491}
22492
22493// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)
22494inline at::Tensor & repeat_symint_outf(const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) {
22495 return at::_ops::repeat_out::call(self, repeats, out);
22496}
22497namespace symint {
22498 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22499 at::Tensor & repeat_outf(const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) {
22500 return at::_ops::repeat_out::call(self, repeats, out);
22501 }
22502}
22503
22504// aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!)
22506 return at::_ops::repeat_interleave_Tensor_out::call(repeats, output_size, out);
22507}
22508// aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!)
22510 return at::_ops::repeat_interleave_Tensor_out::call(repeats, output_size, out);
22511}
22512
22513// aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)
22514inline at::Tensor & _mkldnn_reshape_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef shape) {
22515 return at::_ops::_mkldnn_reshape_out::call(self, shape, out);
22516}
22517// aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)
22518inline at::Tensor & _mkldnn_reshape_outf(const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) {
22519 return at::_ops::_mkldnn_reshape_out::call(self, shape, out);
22520}
22521
22522// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
22523inline at::Tensor & relu_out(at::Tensor & out, const at::Tensor & self) {
22524 return at::_ops::relu_out::call(self, out);
22525}
22526// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
22527inline at::Tensor & relu_outf(const at::Tensor & self, at::Tensor & out) {
22528 return at::_ops::relu_out::call(self, out);
22529}
22530
22531// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
22532inline at::Tensor & select_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) {
22533 return at::_ops::select_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index, out);
22534}
22535namespace symint {
22536 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22537 at::Tensor & select_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) {
22538 return at::_ops::select_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index, out);
22539 }
22540}
22541
22542// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
22543inline at::Tensor & select_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index, at::Tensor & out) {
22544 return at::_ops::select_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index, out);
22545}
22546namespace symint {
22547 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22548 at::Tensor & select_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index, at::Tensor & out) {
22549 return at::_ops::select_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index, out);
22550 }
22551}
22552
22553// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
22554inline at::Tensor & select_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
22555 return at::_ops::select_backward_out::call(grad_output, input_sizes, dim, index, out);
22556}
22557namespace symint {
22558 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22559 at::Tensor & select_backward_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
22560 return at::_ops::select_backward_out::call(grad_output, input_sizes, dim, index, out);
22561 }
22562}
22563
22564// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
22565inline at::Tensor & select_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) {
22566 return at::_ops::select_backward_out::call(grad_output, input_sizes, dim, index, out);
22567}
22568namespace symint {
22569 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22570 at::Tensor & select_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) {
22571 return at::_ops::select_backward_out::call(grad_output, input_sizes, dim, index, out);
22572 }
22573}
22574
22575// aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)
22576inline at::Tensor & celu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & alpha=1.0) {
22577 return at::_ops::celu_out::call(self, alpha, out);
22578}
22579// aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)
22580inline at::Tensor & celu_outf(const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out) {
22581 return at::_ops::celu_out::call(self, alpha, out);
22582}
22583
22584// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)
22585inline at::Tensor & slice_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step) {
22586 return at::_ops::slice_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, start, end, step, out);
22587}
22588namespace symint {
22589 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22590 at::Tensor & slice_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step) {
22591 return at::_ops::slice_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, start, end, step, out);
22592 }
22593}
22594
22595// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)
22596inline at::Tensor & slice_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step, at::Tensor & out) {
22597 return at::_ops::slice_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, start, end, step, out);
22598}
22599namespace symint {
22600 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22601 at::Tensor & slice_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step, at::Tensor & out) {
22602 return at::_ops::slice_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, start, end, step, out);
22603 }
22604}
22605
22606// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)
22607inline at::Tensor & slice_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
22608 return at::_ops::slice_backward_out::call(grad_output, input_sizes, dim, start, end, step, out);
22609}
22610namespace symint {
22611 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22612 at::Tensor & slice_backward_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
22613 return at::_ops::slice_backward_out::call(grad_output, input_sizes, dim, start, end, step, out);
22614 }
22615}
22616
22617// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)
22618inline at::Tensor & slice_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) {
22619 return at::_ops::slice_backward_out::call(grad_output, input_sizes, dim, start, end, step, out);
22620}
22621namespace symint {
22622 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22623 at::Tensor & slice_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) {
22624 return at::_ops::slice_backward_out::call(grad_output, input_sizes, dim, start, end, step, out);
22625 }
22626}
22627
22628// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
22629inline at::Tensor & slice_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional<int64_t> start=c10::nullopt, c10::optional<int64_t> end=c10::nullopt, int64_t step=1) {
22630 return at::_ops::slice_scatter_out::call(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out);
22631}
22632namespace symint {
22633 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22634 at::Tensor & slice_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional<int64_t> start=c10::nullopt, c10::optional<int64_t> end=c10::nullopt, int64_t step=1) {
22635 return at::_ops::slice_scatter_out::call(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out);
22636 }
22637}
22638
22639// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
22640inline at::Tensor & slice_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step, at::Tensor & out) {
22641 return at::_ops::slice_scatter_out::call(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out);
22642}
22643namespace symint {
22644 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22645 at::Tensor & slice_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step, at::Tensor & out) {
22646 return at::_ops::slice_scatter_out::call(self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out);
22647 }
22648}
22649
22650// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
22651inline at::Tensor & slice_scatter_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional<c10::SymInt> start=c10::nullopt, c10::optional<c10::SymInt> end=c10::nullopt, c10::SymInt step=1) {
22652 return at::_ops::slice_scatter_out::call(self, src, dim, start, end, step, out);
22653}
22654namespace symint {
22655 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22656 at::Tensor & slice_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional<c10::SymInt> start=c10::nullopt, c10::optional<c10::SymInt> end=c10::nullopt, c10::SymInt step=1) {
22657 return at::_ops::slice_scatter_out::call(self, src, dim, start, end, step, out);
22658 }
22659}
22660
22661// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
22662inline at::Tensor & slice_scatter_symint_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
22663 return at::_ops::slice_scatter_out::call(self, src, dim, start, end, step, out);
22664}
22665namespace symint {
22666 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22667 at::Tensor & slice_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
22668 return at::_ops::slice_scatter_out::call(self, src, dim, start, end, step, out);
22669 }
22670}
22671
22672// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
22673inline at::Tensor & select_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index) {
22674 return at::_ops::select_scatter_out::call(self, src, dim, index, out);
22675}
22676namespace symint {
22677 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22678 at::Tensor & select_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index) {
22679 return at::_ops::select_scatter_out::call(self, src, dim, index, out);
22680 }
22681}
22682
22683// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
22684inline at::Tensor & select_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index, at::Tensor & out) {
22685 return at::_ops::select_scatter_out::call(self, src, dim, index, out);
22686}
22687namespace symint {
22688 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22689 at::Tensor & select_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index, at::Tensor & out) {
22690 return at::_ops::select_scatter_out::call(self, src, dim, index, out);
22691 }
22692}
22693
22694// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
22695inline at::Tensor & select_scatter_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
22696 return at::_ops::select_scatter_out::call(self, src, dim, index, out);
22697}
22698namespace symint {
22699 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22700 at::Tensor & select_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
22701 return at::_ops::select_scatter_out::call(self, src, dim, index, out);
22702 }
22703}
22704
22705// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
22706inline at::Tensor & select_scatter_symint_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index, at::Tensor & out) {
22707 return at::_ops::select_scatter_out::call(self, src, dim, index, out);
22708}
22709namespace symint {
22710 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22711 at::Tensor & select_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index, at::Tensor & out) {
22712 return at::_ops::select_scatter_out::call(self, src, dim, index, out);
22713 }
22714}
22715
22716// aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
22717inline at::Tensor & diagonal_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) {
22718 return at::_ops::diagonal_scatter_out::call(self, src, offset, dim1, dim2, out);
22719}
22720// aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
22721inline at::Tensor & diagonal_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
22722 return at::_ops::diagonal_scatter_out::call(self, src, offset, dim1, dim2, out);
22723}
22724
22725// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
22726inline at::Tensor & as_strided_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) {
22727 return at::_ops::as_strided_scatter_out::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out);
22728}
22729namespace symint {
22730 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22731 at::Tensor & as_strided_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) {
22732 return at::_ops::as_strided_scatter_out::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out);
22733 }
22734}
22735
22736// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
22737inline at::Tensor & as_strided_scatter_outf(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset, at::Tensor & out) {
22738 return at::_ops::as_strided_scatter_out::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out);
22739}
22740namespace symint {
22741 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22742 at::Tensor & as_strided_scatter_outf(const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset, at::Tensor & out) {
22743 return at::_ops::as_strided_scatter_out::call(self, src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out);
22744 }
22745}
22746
22747// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
22748inline at::Tensor & as_strided_scatter_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) {
22749 return at::_ops::as_strided_scatter_out::call(self, src, size, stride, storage_offset, out);
22750}
22751namespace symint {
22752 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22753 at::Tensor & as_strided_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) {
22754 return at::_ops::as_strided_scatter_out::call(self, src, size, stride, storage_offset, out);
22755 }
22756}
22757
22758// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
22759inline at::Tensor & as_strided_scatter_symint_outf(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) {
22760 return at::_ops::as_strided_scatter_out::call(self, src, size, stride, storage_offset, out);
22761}
22762namespace symint {
22763 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22764 at::Tensor & as_strided_scatter_outf(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) {
22765 return at::_ops::as_strided_scatter_out::call(self, src, size, stride, storage_offset, out);
22766 }
22767}
22768
22769// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
22770inline void unsafe_split_out(at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim=0) {
22771 return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
22772}
22773namespace symint {
22774 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22775 void unsafe_split_out(at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim=0) {
22776 return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
22777 }
22778}
22779
22780// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
22781inline void unsafe_split_outf(const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) {
22782 return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
22783}
22784namespace symint {
22785 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22786 void unsafe_split_outf(const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) {
22787 return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
22788 }
22789}
22790
22791// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
22792inline void unsafe_split_symint_out(at::TensorList out, const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) {
22793 return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
22794}
22795namespace symint {
22796 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22797 void unsafe_split_out(at::TensorList out, const at::Tensor & self, c10::SymInt split_size, int64_t dim=0) {
22798 return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
22799 }
22800}
22801
22802// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()
22803inline void unsafe_split_symint_outf(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
22804 return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
22805}
22806namespace symint {
22807 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22808 void unsafe_split_outf(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
22809 return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
22810 }
22811}
22812
22813// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
22814inline void unsafe_split_with_sizes_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) {
22815 return at::_ops::unsafe_split_with_sizes_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out);
22816}
22817namespace symint {
22818 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22819 void unsafe_split_with_sizes_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) {
22820 return at::_ops::unsafe_split_with_sizes_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out);
22821 }
22822}
22823
22824// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
22825inline void unsafe_split_with_sizes_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) {
22826 return at::_ops::unsafe_split_with_sizes_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out);
22827}
22828namespace symint {
22829 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
22830 void unsafe_split_with_sizes_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) {
22831 return at::_ops::unsafe_split_with_sizes_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out);
22832 }
22833}
22834
22835// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
22836inline void unsafe_split_with_sizes_symint_out(at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) {
22837 return at::_ops::unsafe_split_with_sizes_out::call(self, split_sizes, dim, out);
22838}
22839namespace symint {
22840 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22841 void unsafe_split_with_sizes_out(at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) {
22842 return at::_ops::unsafe_split_with_sizes_out::call(self, split_sizes, dim, out);
22843 }
22844}
22845
22846// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()
22847inline void unsafe_split_with_sizes_symint_outf(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
22848 return at::_ops::unsafe_split_with_sizes_out::call(self, split_sizes, dim, out);
22849}
22850namespace symint {
22851 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
22852 void unsafe_split_with_sizes_outf(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
22853 return at::_ops::unsafe_split_with_sizes_out::call(self, split_sizes, dim, out);
22854 }
22855}
22856
22857// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
22859 return at::_ops::sum_out::call(self, dtype, out);
22860}
22861// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
22863 return at::_ops::sum_out::call(self, dtype, out);
22864}
22865
22866// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
22867inline ::std::tuple<at::Tensor &,at::Tensor &> std_mean_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
22868 return at::_ops::std_mean_correction_out::call(self, dim, correction, keepdim, out0, out1);
22869}
22870// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
22871inline ::std::tuple<at::Tensor &,at::Tensor &> std_mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
22872 return at::_ops::std_mean_correction_out::call(self, dim, correction, keepdim, out0, out1);
22873}
22874
22875// aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
22877 return at::_ops::prod_out::call(self, dtype, out);
22878}
22879// aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
22881 return at::_ops::prod_out::call(self, dtype, out);
22882}
22883
22884// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
22885inline at::Tensor & _mkldnn_transpose_out(at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) {
22886 return at::_ops::_mkldnn_transpose_out::call(self, dim0, dim1, out);
22887}
22888// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
22889inline at::Tensor & _mkldnn_transpose_outf(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
22890 return at::_ops::_mkldnn_transpose_out::call(self, dim0, dim1, out);
22891}
22892
22893// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
22894inline at::Tensor & flip_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims) {
22895 return at::_ops::flip_out::call(self, dims, out);
22896}
22897// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
22898inline at::Tensor & flip_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
22899 return at::_ops::flip_out::call(self, dims, out);
22900}
22901
22902// aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)
22903inline at::Tensor & roll_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims={}) {
22904 return at::_ops::roll_out::call(self, shifts, dims, out);
22905}
22906// aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)
22907inline at::Tensor & roll_outf(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) {
22908 return at::_ops::roll_out::call(self, shifts, dims, out);
22909}
22910
22911// aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!)
22912inline at::Tensor & rot90_out(at::Tensor & out, const at::Tensor & self, int64_t k=1, at::IntArrayRef dims={0,1}) {
22913 return at::_ops::rot90_out::call(self, k, dims, out);
22914}
22915// aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!)
22916inline at::Tensor & rot90_outf(const at::Tensor & self, int64_t k, at::IntArrayRef dims, at::Tensor & out) {
22917 return at::_ops::rot90_out::call(self, k, dims, out);
22918}
22919
22920// aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
22921inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transform_bias_rescale_qkv_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
22922 return at::_ops::_transform_bias_rescale_qkv_out::call(qkv, qkv_bias, num_heads, out0, out1, out2);
22923}
22924// aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
22925inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transform_bias_rescale_qkv_outf(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
22926 return at::_ops::_transform_bias_rescale_qkv_out::call(qkv, qkv_bias, num_heads, out0, out1, out2);
22927}
22928
22929// aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)
22930inline at::Tensor & _nested_tensor_from_mask_out(at::Tensor & out, const at::Tensor & t, const at::Tensor & mask, bool mask_check=true) {
22931 return at::_ops::_nested_tensor_from_mask_out::call(t, mask, mask_check, out);
22932}
22933// aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)
22934inline at::Tensor & _nested_tensor_from_mask_outf(const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out) {
22935 return at::_ops::_nested_tensor_from_mask_out::call(t, mask, mask_check, out);
22936}
22937
22938// aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)
22939inline at::Tensor & _nested_from_padded_out(at::Tensor & out, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213=false) {
22940 return at::_ops::_nested_from_padded_out::call(padded, cpu_nested_shape_example, fuse_transform_0213, out);
22941}
22942// aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)
22943inline at::Tensor & _nested_from_padded_outf(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) {
22944 return at::_ops::_nested_from_padded_out::call(padded, cpu_nested_shape_example, fuse_transform_0213, out);
22945}
22946
22947// aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
22949 return at::_ops::_nested_tensor_size_out::call(self, out);
22950}
22951// aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
22953 return at::_ops::_nested_tensor_size_out::call(self, out);
22954}
22955
22956// aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
22958 return at::_ops::_nested_tensor_strides_out::call(self, out);
22959}
22960// aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
22962 return at::_ops::_nested_tensor_strides_out::call(self, out);
22963}
22964
22965// aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)
22966inline at::Tensor & _nested_from_padded_and_nested_example_out(at::Tensor & out, const at::Tensor & padded, const at::Tensor & nt_example) {
22967 return at::_ops::_nested_from_padded_and_nested_example_out::call(padded, nt_example, out);
22968}
22969// aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)
22971 return at::_ops::_nested_from_padded_and_nested_example_out::call(padded, nt_example, out);
22972}
22973
22974// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!)
22975inline at::Tensor & _nested_view_from_buffer_copy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
22976 return at::_ops::_nested_view_from_buffer_copy_out::call(self, nested_size, nested_strides, offsets, out);
22977}
22978// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!)
22979inline at::Tensor & _nested_view_from_buffer_copy_outf(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets, at::Tensor & out) {
22980 return at::_ops::_nested_view_from_buffer_copy_out::call(self, nested_size, nested_strides, offsets, out);
22981}
22982
22983// aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)
22984inline at::Tensor & _trilinear_out(at::Tensor & out, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim=1) {
22985 return at::_ops::_trilinear_out::call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out);
22986}
22987// aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)
22988inline at::Tensor & _trilinear_outf(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out) {
22989 return at::_ops::_trilinear_out::call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out);
22990}
22991
22992// aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
22993inline ::std::tuple<at::Tensor &,at::Tensor &> _unique_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, bool sorted=true, bool return_inverse=false) {
22994 return at::_ops::_unique_out::call(self, sorted, return_inverse, out0, out1);
22995}
22996// aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
22997inline ::std::tuple<at::Tensor &,at::Tensor &> _unique_outf(const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) {
22998 return at::_ops::_unique_out::call(self, sorted, return_inverse, out0, out1);
22999}
23000
23001// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
23002inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, int64_t dim, bool sorted=true, bool return_inverse=false, bool return_counts=false) {
23003 return at::_ops::unique_dim_out::call(self, dim, sorted, return_inverse, return_counts, out0, out1, out2);
23004}
23005// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
23006inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_outf(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
23007 return at::_ops::unique_dim_out::call(self, dim, sorted, return_inverse, return_counts, out0, out1, out2);
23008}
23009
23010// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
23011inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_consecutive_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, bool return_inverse=false, bool return_counts=false, c10::optional<int64_t> dim=c10::nullopt) {
23012 return at::_ops::unique_consecutive_out::call(self, return_inverse, return_counts, dim, out0, out1, out2);
23013}
23014// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
23015inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_consecutive_outf(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
23016 return at::_ops::unique_consecutive_out::call(self, return_inverse, return_counts, dim, out0, out1, out2);
23017}
23018
23019// aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
23020inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_consecutive_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, int64_t dim, bool return_inverse=false, bool return_counts=false) {
23021 return at::_ops::unique_dim_consecutive_out::call(self, dim, return_inverse, return_counts, out0, out1, out2);
23022}
23023// aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
23024inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_consecutive_outf(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
23025 return at::_ops::unique_dim_consecutive_out::call(self, dim, return_inverse, return_counts, out0, out1, out2);
23026}
23027
23028// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
23029inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, bool sorted=true, bool return_inverse=false, bool return_counts=false) {
23030 return at::_ops::_unique2_out::call(self, sorted, return_inverse, return_counts, out0, out1, out2);
23031}
23032// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
23033inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_outf(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
23034 return at::_ops::_unique2_out::call(self, sorted, return_inverse, return_counts, out0, out1, out2);
23035}
23036
23037// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
23038inline at::Tensor & _unsafe_view_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
23039 return at::_ops::_unsafe_view_out::call(self, c10::fromIntArrayRefSlow(size), out);
23040}
23041namespace symint {
23042 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
23043 at::Tensor & _unsafe_view_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
23044 return at::_ops::_unsafe_view_out::call(self, c10::fromIntArrayRefSlow(size), out);
23045 }
23046}
23047
23048// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
23049inline at::Tensor & _unsafe_view_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
23050 return at::_ops::_unsafe_view_out::call(self, c10::fromIntArrayRefSlow(size), out);
23051}
23052namespace symint {
23053 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
23054 at::Tensor & _unsafe_view_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
23055 return at::_ops::_unsafe_view_out::call(self, c10::fromIntArrayRefSlow(size), out);
23056 }
23057}
23058
23059// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
23060inline at::Tensor & _unsafe_view_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
23061 return at::_ops::_unsafe_view_out::call(self, size, out);
23062}
23063namespace symint {
23064 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
23065 at::Tensor & _unsafe_view_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
23066 return at::_ops::_unsafe_view_out::call(self, size, out);
23067 }
23068}
23069
23070// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
23071inline at::Tensor & _unsafe_view_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
23072 return at::_ops::_unsafe_view_out::call(self, size, out);
23073}
23074namespace symint {
23075 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
23076 at::Tensor & _unsafe_view_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
23077 return at::_ops::_unsafe_view_out::call(self, size, out);
23078 }
23079}
23080
23081// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23082inline ::std::tuple<at::Tensor &,at::Tensor &> var_mean_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<int64_t> correction=c10::nullopt, bool keepdim=false) {
23083 return at::_ops::var_mean_correction_out::call(self, dim, correction, keepdim, out0, out1);
23084}
23085// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23086inline ::std::tuple<at::Tensor &,at::Tensor &> var_mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
23087 return at::_ops::var_mean_correction_out::call(self, dim, correction, keepdim, out0, out1);
23088}
23089
23090// aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23091inline ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & v, const at::Tensor & g, int64_t dim=0) {
23092 return at::_ops::_weight_norm_interface_out::call(v, g, dim, out0, out1);
23093}
23094// aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23095inline ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_outf(const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
23096 return at::_ops::_weight_norm_interface_out::call(v, g, dim, out0, out1);
23097}
23098
23099// aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23100inline ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
23101 return at::_ops::_weight_norm_interface_backward_out::call(grad_w, saved_v, saved_g, saved_norms, dim, out0, out1);
23102}
23103// aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23104inline ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_backward_outf(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
23105 return at::_ops::_weight_norm_interface_backward_out::call(grad_w, saved_v, saved_g, saved_norms, dim, out0, out1);
23106}
23107
23108// aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
23109inline at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size, c10::optional<at::DimnameList> names) {
23110 return at::_ops::zeros_names_out::call(size, names, out);
23111}
23112// aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
23113inline at::Tensor & zeros_outf(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::Tensor & out) {
23114 return at::_ops::zeros_names_out::call(size, names, out);
23115}
23116
23117// aten::_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
23118inline at::Tensor & _efficientzerotensor_out(at::Tensor & out, at::IntArrayRef size) {
23119 return at::_ops::_efficientzerotensor_out::call(size, out);
23120}
23121// aten::_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
23122inline at::Tensor & _efficientzerotensor_outf(at::IntArrayRef size, at::Tensor & out) {
23123 return at::_ops::_efficientzerotensor_out::call(size, out);
23124}
23125
23126// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
23128 return at::_ops::zeros_like_out::call(self, memory_format, out);
23129}
23130// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
23132 return at::_ops::zeros_like_out::call(self, memory_format, out);
23133}
23134
23135// aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)
23136inline at::Tensor & _standard_gamma_grad_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & output) {
23137 return at::_ops::_standard_gamma_grad_out::call(self, output, out);
23138}
23139// aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)
23140inline at::Tensor & _standard_gamma_grad_outf(const at::Tensor & self, const at::Tensor & output, at::Tensor & out) {
23141 return at::_ops::_standard_gamma_grad_out::call(self, output, out);
23142}
23143
23144// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
23146 return at::_ops::_standard_gamma_out::call(self, generator, out);
23147}
23148// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
23150 return at::_ops::_standard_gamma_out::call(self, generator, out);
23151}
23152
23153// aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)
23154inline at::Tensor & _dirichlet_grad_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
23155 return at::_ops::_dirichlet_grad_out::call(x, alpha, total, out);
23156}
23157// aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)
23158inline at::Tensor & _dirichlet_grad_outf(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out) {
23159 return at::_ops::_dirichlet_grad_out::call(x, alpha, total, out);
23160}
23161
23162// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
23164 return at::_ops::_sample_dirichlet_out::call(self, generator, out);
23165}
23166// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
23168 return at::_ops::_sample_dirichlet_out::call(self, generator, out);
23169}
23170
23171// aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
23173 return at::_ops::poisson_out::call(self, generator, out);
23174}
23175// aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
23177 return at::_ops::poisson_out::call(self, generator, out);
23178}
23179
23180// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
23182 return at::_ops::binomial_out::call(count, prob, generator, out);
23183}
23184// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
23185inline at::Tensor & binomial_outf(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator, at::Tensor & out) {
23186 return at::_ops::binomial_out::call(count, prob, generator, out);
23187}
23188
23189// aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
23190inline at::Tensor & native_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & p=2) {
23191 return at::_ops::native_norm_out::call(self, p, out);
23192}
23193// aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
23194inline at::Tensor & native_norm_outf(const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
23195 return at::_ops::native_norm_out::call(self, p, out);
23196}
23197
23198// aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!)
23199inline at::Tensor & native_norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
23200 return at::_ops::native_norm_ScalarOpt_dim_dtype_out::call(self, p, dim, keepdim, dtype, out);
23201}
23202// aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!)
23203inline at::Tensor & native_norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
23204 return at::_ops::native_norm_ScalarOpt_dim_dtype_out::call(self, p, dim, keepdim, dtype, out);
23205}
23206
23207// aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)
23208inline at::Tensor & _sparse_sum_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim) {
23209 return at::_ops::_sparse_sum_dim_out::call(self, dim, out);
23210}
23211// aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)
23212inline at::Tensor & _sparse_sum_outf(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
23213 return at::_ops::_sparse_sum_dim_out::call(self, dim, out);
23214}
23215
23216// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
23217inline at::Tensor & _sparse_sum_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
23218 return at::_ops::_sparse_sum_backward_out::call(grad, self, dim, out);
23219}
23220// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
23221inline at::Tensor & _sparse_sum_backward_outf(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
23222 return at::_ops::_sparse_sum_backward_out::call(grad, self, dim, out);
23223}
23224
23225// aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
23226inline at::Tensor & _sparse_csr_sum_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
23227 return at::_ops::_sparse_csr_sum_dim_dtype_out::call(self, dim, keepdim, dtype, out);
23228}
23229// aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
23230inline at::Tensor & _sparse_csr_sum_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
23231 return at::_ops::_sparse_csr_sum_dim_dtype_out::call(self, dim, keepdim, dtype, out);
23232}
23233
23234// aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
23235inline at::Tensor & _sparse_csr_prod_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
23236 return at::_ops::_sparse_csr_prod_dim_dtype_out::call(self, dim, keepdim, dtype, out);
23237}
23238// aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
23239inline at::Tensor & _sparse_csr_prod_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
23240 return at::_ops::_sparse_csr_prod_dim_dtype_out::call(self, dim, keepdim, dtype, out);
23241}
23242
23243// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
23244inline at::Tensor & _sparse_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) {
23245 return at::_ops::_sparse_softmax_out::call(self, dim, half_to_float, out);
23246}
23247// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
23248inline at::Tensor & _sparse_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
23249 return at::_ops::_sparse_softmax_out::call(self, dim, half_to_float, out);
23250}
23251
23252// aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23253inline at::Tensor & _sparse_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
23254 return at::_ops::_sparse_softmax_backward_data_out::call(grad_output, output, dim, self, out);
23255}
23256// aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23257inline at::Tensor & _sparse_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
23258 return at::_ops::_sparse_softmax_backward_data_out::call(grad_output, output, dim, self, out);
23259}
23260
23261// aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
23262inline at::Tensor & _sparse_log_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) {
23263 return at::_ops::_sparse_log_softmax_out::call(self, dim, half_to_float, out);
23264}
23265// aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
23266inline at::Tensor & _sparse_log_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
23267 return at::_ops::_sparse_log_softmax_out::call(self, dim, half_to_float, out);
23268}
23269
23270// aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23271inline at::Tensor & _sparse_log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
23272 return at::_ops::_sparse_log_softmax_backward_data_out::call(grad_output, output, dim, self, out);
23273}
23274// aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23275inline at::Tensor & _sparse_log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
23276 return at::_ops::_sparse_log_softmax_backward_data_out::call(grad_output, output, dim, self, out);
23277}
23278
23279// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)
23280inline at::Tensor & _spdiags_out(at::Tensor & out, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout=c10::nullopt) {
23281 return at::_ops::_spdiags_out::call(diagonals, offsets, shape, layout, out);
23282}
23283// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)
23284inline at::Tensor & _spdiags_outf(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout, at::Tensor & out) {
23285 return at::_ops::_spdiags_out::call(diagonals, offsets, shape, layout, out);
23286}
23287
23288// aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
23289inline at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype) {
23290 return at::_ops::norm_ScalarOpt_dtype_out::call(self, p, dtype, out);
23291}
23292// aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
23293inline at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype, at::Tensor & out) {
23294 return at::_ops::norm_ScalarOpt_dtype_out::call(self, p, dtype, out);
23295}
23296
23297// aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
23298inline at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & p=2) {
23299 return at::_ops::norm_Scalar_out::call(self, p, out);
23300}
23301// aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
23302inline at::Tensor & norm_outf(const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
23303 return at::_ops::norm_Scalar_out::call(self, p, out);
23304}
23305
23306// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
23308 return at::_ops::clone_out::call(self, memory_format, out);
23309}
23310// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
23311inline at::Tensor & clone_outf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
23312 return at::_ops::clone_out::call(self, memory_format, out);
23313}
23314
23315// aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
23316inline const at::Tensor & resize_as_out(const at::Tensor & out, const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
23317 return at::_ops::resize_as_out::call(self, the_template, memory_format, out);
23318}
23319// aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
23320inline const at::Tensor & resize_as_outf(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
23321 return at::_ops::resize_as_out::call(self, the_template, memory_format, out);
23322}
23323
23324// aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor
23325inline at::Tensor resize_as(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
23326 return at::_ops::resize_as::call(self, the_template, memory_format);
23327}
23328
23329// aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)
23330inline const at::Tensor & resize_as_sparse_out(const at::Tensor & out, const at::Tensor & self, const at::Tensor & the_template) {
23331 return at::_ops::resize_as_sparse_out::call(self, the_template, out);
23332}
23333// aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)
23334inline const at::Tensor & resize_as_sparse_outf(const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out) {
23335 return at::_ops::resize_as_sparse_out::call(self, the_template, out);
23336}
23337
23338// aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor
23339inline at::Tensor resize_as_sparse(const at::Tensor & self, const at::Tensor & the_template) {
23340 return at::_ops::resize_as_sparse::call(self, the_template);
23341}
23342
23343// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23344inline at::Tensor & zero_out(at::Tensor & out, const at::Tensor & self) {
23345 return at::_ops::zero_out::call(self, out);
23346}
23347// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23348inline at::Tensor & zero_outf(const at::Tensor & self, at::Tensor & out) {
23349 return at::_ops::zero_out::call(self, out);
23350}
23351
23352// aten::zero(Tensor self) -> Tensor
23353inline at::Tensor zero(const at::Tensor & self) {
23354 return at::_ops::zero::call(self);
23355}
23356
23357// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
23358inline at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) {
23359 return at::_ops::sub_Scalar_out::call(self, other, alpha, out);
23360}
23361// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
23362inline at::Tensor & sub_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
23363 return at::_ops::sub_Scalar_out::call(self, other, alpha, out);
23364}
23365
23366// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
23367inline at::Tensor & rsub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) {
23368 return at::_ops::rsub_Tensor_out::call(self, other, alpha, out);
23369}
23370// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
23371inline at::Tensor & rsub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
23372 return at::_ops::rsub_Tensor_out::call(self, other, alpha, out);
23373}
23374
23375// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
23376inline at::Tensor & rsub_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) {
23377 return at::_ops::rsub_Scalar_out::call(self, other, alpha, out);
23378}
23379// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)
23380inline at::Tensor & rsub_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
23381 return at::_ops::rsub_Scalar_out::call(self, other, alpha, out);
23382}
23383
23384// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
23385inline at::Tensor & _sparse_addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
23386 return at::_ops::_sparse_addmm_out::call(self, mat1, mat2, beta, alpha, out);
23387}
23388// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
23389inline at::Tensor & _sparse_addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
23390 return at::_ops::_sparse_addmm_out::call(self, mat1, mat2, beta, alpha, out);
23391}
23392
23393// aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
23394inline at::Tensor & sparse_coo_tensor_out(at::Tensor & out, at::IntArrayRef size) {
23395 return at::_ops::sparse_coo_tensor_size_out::call(size, out);
23396}
23397// aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
23398inline at::Tensor & sparse_coo_tensor_outf(at::IntArrayRef size, at::Tensor & out) {
23399 return at::_ops::sparse_coo_tensor_size_out::call(size, out);
23400}
23401
23402// aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)
23403inline at::Tensor & _sparse_coo_tensor_with_dims_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size) {
23404 return at::_ops::_sparse_coo_tensor_with_dims_out::call(sparse_dim, dense_dim, size, out);
23405}
23406// aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)
23407inline at::Tensor & _sparse_coo_tensor_with_dims_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out) {
23408 return at::_ops::_sparse_coo_tensor_with_dims_out::call(sparse_dim, dense_dim, size, out);
23409}
23410
23411// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
23412inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values) {
23413 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, out);
23414}
23415namespace symint {
23416 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
23417 at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values) {
23418 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, out);
23419 }
23420}
23421
23422// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
23423inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::Tensor & out) {
23424 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, out);
23425}
23426namespace symint {
23427 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
23428 at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::Tensor & out) {
23429 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, c10::fromIntArrayRefSlow(size), indices, values, out);
23430 }
23431}
23432
23433// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
23434inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values) {
23435 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices, values, out);
23436}
23437namespace symint {
23438 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
23439 at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values) {
23440 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices, values, out);
23441 }
23442}
23443
23444// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!)
23445inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_outf(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::Tensor & out) {
23446 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices, values, out);
23447}
23448namespace symint {
23449 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
23450 at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::Tensor & out) {
23451 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices, values, out);
23452 }
23453}
23454
23455// aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
23456inline const at::Tensor & sparse_resize_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
23457 return at::_ops::sparse_resize_out::call(self, size, sparse_dim, dense_dim, out);
23458}
23459// aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
23460inline const at::Tensor & sparse_resize_outf(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
23461 return at::_ops::sparse_resize_out::call(self, size, sparse_dim, dense_dim, out);
23462}
23463
23464// aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
23465inline at::Tensor sparse_resize(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
23466 return at::_ops::sparse_resize::call(self, size, sparse_dim, dense_dim);
23467}
23468
23469// aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
23470inline const at::Tensor & sparse_resize_and_clear_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
23471 return at::_ops::sparse_resize_and_clear_out::call(self, size, sparse_dim, dense_dim, out);
23472}
23473// aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)
23474inline const at::Tensor & sparse_resize_and_clear_outf(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
23475 return at::_ops::sparse_resize_and_clear_out::call(self, size, sparse_dim, dense_dim, out);
23476}
23477
23478// aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor
23479inline at::Tensor sparse_resize_and_clear(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
23480 return at::_ops::sparse_resize_and_clear::call(self, size, sparse_dim, dense_dim);
23481}
23482
23483// aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
23484inline at::Tensor & sparse_mask_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask) {
23485 return at::_ops::sparse_mask_out::call(self, mask, out);
23486}
23487// aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)
23488inline at::Tensor & sparse_mask_outf(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
23489 return at::_ops::sparse_mask_out::call(self, mask, out);
23490}
23491
23492// aten::_to_dense.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
23494 return at::_ops::_to_dense_out::call(self, dtype, out);
23495}
23496// aten::_to_dense.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
23498 return at::_ops::_to_dense_out::call(self, dtype, out);
23499}
23500
23501// aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23502inline at::Tensor & _coalesce_out(at::Tensor & out, const at::Tensor & self) {
23503 return at::_ops::_coalesce_out::call(self, out);
23504}
23505// aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23506inline at::Tensor & _coalesce_outf(const at::Tensor & self, at::Tensor & out) {
23507 return at::_ops::_coalesce_out::call(self, out);
23508}
23509
23510// aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)
23511inline at::Tensor & _coalesced_out(at::Tensor & out, const at::Tensor & self, bool coalesced) {
23512 return at::_ops::_coalesced_out::call(self, coalesced, out);
23513}
23514// aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)
23515inline at::Tensor & _coalesced_outf(const at::Tensor & self, bool coalesced, at::Tensor & out) {
23516 return at::_ops::_coalesced_out::call(self, coalesced, out);
23517}
23518
23519// aten::_coalesced(Tensor self, bool coalesced) -> Tensor
23520inline at::Tensor _coalesced(const at::Tensor & self, bool coalesced) {
23521 return at::_ops::_coalesced::call(self, coalesced);
23522}
23523
23524// aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
23525inline at::Tensor & copy_sparse_to_sparse_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) {
23526 return at::_ops::copy_sparse_to_sparse_out::call(self, src, non_blocking, out);
23527}
23528// aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)
23529inline at::Tensor & copy_sparse_to_sparse_outf(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) {
23530 return at::_ops::copy_sparse_to_sparse_out::call(self, src, non_blocking, out);
23531}
23532
23533// aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor
23534inline at::Tensor copy_sparse_to_sparse(const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) {
23535 return at::_ops::copy_sparse_to_sparse::call(self, src, non_blocking);
23536}
23537
23538// aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)
23539inline at::Tensor & to_sparse_out(at::Tensor & out, const at::Tensor & self, int64_t sparse_dim) {
23540 return at::_ops::to_sparse_sparse_dim_out::call(self, sparse_dim, out);
23541}
23542// aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)
23543inline at::Tensor & to_sparse_outf(const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) {
23544 return at::_ops::to_sparse_sparse_dim_out::call(self, sparse_dim, out);
23545}
23546
23547// aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!)
23548inline at::Tensor & to_sparse_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::Layout> layout=c10::nullopt, at::OptionalIntArrayRef blocksize=c10::nullopt, c10::optional<int64_t> dense_dim=c10::nullopt) {
23549 return at::_ops::to_sparse_out::call(self, layout, blocksize, dense_dim, out);
23550}
23551// aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!)
23552inline at::Tensor & to_sparse_outf(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) {
23553 return at::_ops::to_sparse_out::call(self, layout, blocksize, dense_dim, out);
23554}
23555
23556// aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
23558 return at::_ops::to_sparse_csr_out::call(self, dense_dim, out);
23559}
23560// aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
23562 return at::_ops::to_sparse_csr_out::call(self, dense_dim, out);
23563}
23564
23565// aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
23567 return at::_ops::to_sparse_csc_out::call(self, dense_dim, out);
23568}
23569// aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
23571 return at::_ops::to_sparse_csc_out::call(self, dense_dim, out);
23572}
23573
23574// aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
23575inline at::Tensor & to_sparse_bsr_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim=c10::nullopt) {
23576 return at::_ops::to_sparse_bsr_out::call(self, blocksize, dense_dim, out);
23577}
23578// aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
23579inline at::Tensor & to_sparse_bsr_outf(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) {
23580 return at::_ops::to_sparse_bsr_out::call(self, blocksize, dense_dim, out);
23581}
23582
23583// aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
23584inline at::Tensor & to_sparse_bsc_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim=c10::nullopt) {
23585 return at::_ops::to_sparse_bsc_out::call(self, blocksize, dense_dim, out);
23586}
23587// aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)
23588inline at::Tensor & to_sparse_bsc_outf(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out) {
23589 return at::_ops::to_sparse_bsc_out::call(self, blocksize, dense_dim, out);
23590}
23591
23592// aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
23594 return at::_ops::to_mkldnn_out::call(self, dtype, out);
23595}
23596// aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)
23598 return at::_ops::to_mkldnn_out::call(self, dtype, out);
23599}
23600
23601// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)
23602inline at::Tensor & mkldnn_reorder_conv2d_weight_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=c10::nullopt) {
23603 return at::_ops::mkldnn_reorder_conv2d_weight_out::call(self, padding, stride, dilation, groups, input_size, out);
23604}
23605// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)
23606inline at::Tensor & mkldnn_reorder_conv2d_weight_outf(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out) {
23607 return at::_ops::mkldnn_reorder_conv2d_weight_out::call(self, padding, stride, dilation, groups, input_size, out);
23608}
23609
23610// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!)
23611inline at::Tensor & mkldnn_reorder_conv3d_weight_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1) {
23612 return at::_ops::mkldnn_reorder_conv3d_weight_out::call(self, padding, stride, dilation, groups, out);
23613}
23614// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!)
23615inline at::Tensor & mkldnn_reorder_conv3d_weight_outf(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) {
23616 return at::_ops::mkldnn_reorder_conv3d_weight_out::call(self, padding, stride, dilation, groups, out);
23617}
23618
23619// aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)
23620inline at::Tensor & quantize_per_tensor_dynamic_out(at::Tensor & out, const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
23621 return at::_ops::quantize_per_tensor_dynamic_out::call(self, dtype, reduce_range, out);
23622}
23623// aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)
23624inline at::Tensor & quantize_per_tensor_dynamic_outf(const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out) {
23625 return at::_ops::quantize_per_tensor_dynamic_out::call(self, dtype, reduce_range, out);
23626}
23627
23628// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
23629inline at::Tensor & quantize_per_tensor_out(at::Tensor & out, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
23630 return at::_ops::quantize_per_tensor_out::call(self, scale, zero_point, dtype, out);
23631}
23632// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
23633inline at::Tensor & quantize_per_tensor_outf(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) {
23634 return at::_ops::quantize_per_tensor_out::call(self, scale, zero_point, dtype, out);
23635}
23636
23637// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
23638inline at::Tensor & quantize_per_tensor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
23639 return at::_ops::quantize_per_tensor_tensor_qparams_out::call(self, scale, zero_point, dtype, out);
23640}
23641// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
23642inline at::Tensor & quantize_per_tensor_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) {
23643 return at::_ops::quantize_per_tensor_tensor_qparams_out::call(self, scale, zero_point, dtype, out);
23644}
23645
23646// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()
23647inline void quantize_per_tensor_out(at::TensorList out, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
23648 return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out);
23649}
23650// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()
23651inline void quantize_per_tensor_outf(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) {
23652 return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out);
23653}
23654
23655// aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
23656inline at::Tensor & quantize_per_channel_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
23657 return at::_ops::quantize_per_channel_out::call(self, scales, zero_points, axis, dtype, out);
23658}
23659// aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
23660inline at::Tensor & quantize_per_channel_outf(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out) {
23661 return at::_ops::quantize_per_channel_out::call(self, scales, zero_points, axis, dtype, out);
23662}
23663
23664// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23665inline at::Tensor & dequantize_out(at::Tensor & out, const at::Tensor & self) {
23666 return at::_ops::dequantize_self_out::call(self, out);
23667}
23668// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23669inline at::Tensor & dequantize_outf(const at::Tensor & self, at::Tensor & out) {
23670 return at::_ops::dequantize_self_out::call(self, out);
23671}
23672
23673// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()
23675 return at::_ops::dequantize_tensors_out::call(tensors, out);
23676}
23677// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()
23679 return at::_ops::dequantize_tensors_out::call(tensors, out);
23680}
23681
23682// aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23684 return at::_ops::q_per_channel_scales_out::call(self, out);
23685}
23686// aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23688 return at::_ops::q_per_channel_scales_out::call(self, out);
23689}
23690
23691// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23693 return at::_ops::q_per_channel_zero_points_out::call(self, out);
23694}
23695// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23697 return at::_ops::q_per_channel_zero_points_out::call(self, out);
23698}
23699
23700// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23701inline at::Tensor & int_repr_out(at::Tensor & out, const at::Tensor & self) {
23702 return at::_ops::int_repr_out::call(self, out);
23703}
23704// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23705inline at::Tensor & int_repr_outf(const at::Tensor & self, at::Tensor & out) {
23706 return at::_ops::int_repr_out::call(self, out);
23707}
23708
23709// aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)
23710inline at::Tensor & _make_per_tensor_quantized_tensor_out(at::Tensor & out, const at::Tensor & self, double scale, int64_t zero_point) {
23711 return at::_ops::_make_per_tensor_quantized_tensor_out::call(self, scale, zero_point, out);
23712}
23713// aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)
23714inline at::Tensor & _make_per_tensor_quantized_tensor_outf(const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out) {
23715 return at::_ops::_make_per_tensor_quantized_tensor_out::call(self, scale, zero_point, out);
23716}
23717
23718// aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)
23719inline at::Tensor & _make_per_channel_quantized_tensor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
23720 return at::_ops::_make_per_channel_quantized_tensor_out::call(self, scale, zero_point, axis, out);
23721}
23722// aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)
23723inline at::Tensor & _make_per_channel_quantized_tensor_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out) {
23724 return at::_ops::_make_per_channel_quantized_tensor_out::call(self, scale, zero_point, axis, out);
23725}
23726
23727// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23728inline ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
23729 return at::_ops::fake_quantize_per_tensor_affine_cachemask_out::call(self, scale, zero_point, quant_min, quant_max, out0, out1);
23730}
23731// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23732inline ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_outf(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
23733 return at::_ops::fake_quantize_per_tensor_affine_cachemask_out::call(self, scale, zero_point, quant_min, quant_max, out0, out1);
23734}
23735
23736// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23737inline ::std::tuple<at::Tensor &,at::Tensor &> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
23738 return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1);
23739}
23740// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23741inline ::std::tuple<at::Tensor &,at::Tensor &> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
23742 return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1);
23743}
23744
23745// aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
23746inline at::Tensor & _fake_quantize_learnable_per_tensor_affine_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) {
23747 return at::_ops::_fake_quantize_learnable_per_tensor_affine_out::call(self, scale, zero_point, quant_min, quant_max, grad_factor, out);
23748}
23749// aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
23750inline at::Tensor & _fake_quantize_learnable_per_tensor_affine_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) {
23751 return at::_ops::_fake_quantize_learnable_per_tensor_affine_out::call(self, scale, zero_point, quant_min, quant_max, grad_factor, out);
23752}
23753
23754// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23755inline ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
23756 return at::_ops::fake_quantize_per_channel_affine_cachemask_out::call(self, scale, zero_point, axis, quant_min, quant_max, out0, out1);
23757}
23758// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23759inline ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
23760 return at::_ops::fake_quantize_per_channel_affine_cachemask_out::call(self, scale, zero_point, axis, quant_min, quant_max, out0, out1);
23761}
23762
23763// aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
23764inline at::Tensor & _fake_quantize_learnable_per_channel_affine_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) {
23765 return at::_ops::_fake_quantize_learnable_per_channel_affine_out::call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out);
23766}
23767// aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)
23768inline at::Tensor & _fake_quantize_learnable_per_channel_affine_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) {
23769 return at::_ops::_fake_quantize_learnable_per_channel_affine_out::call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out);
23770}
23771
23772// aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))
23773inline ::std::tuple<at::Tensor &,at::Tensor &> _fused_moving_avg_obs_fq_helper_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) {
23774 return at::_ops::_fused_moving_avg_obs_fq_helper_out::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1);
23775}
23776// aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))
23777inline ::std::tuple<at::Tensor &,at::Tensor &> _fused_moving_avg_obs_fq_helper_outf(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) {
23778 return at::_ops::_fused_moving_avg_obs_fq_helper_out::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1);
23779}
23780
23781// aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)
23782inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) {
23783 return at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
23784}
23785
23786// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
23787inline at::Tensor & _to_copy_out(at::Tensor & out, const at::Tensor & self, bool non_blocking=false, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
23788 return at::_ops::_to_copy_out::call(self, non_blocking, memory_format, out);
23789}
23790// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
23791inline at::Tensor & _to_copy_outf(const at::Tensor & self, bool non_blocking, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
23792 return at::_ops::_to_copy_out::call(self, non_blocking, memory_format, out);
23793}
23794
23795// aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!))
23796inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _lstm_mps_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
23797 return at::_ops::_lstm_mps_out::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4, out5);
23798}
23799// aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!))
23800inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _lstm_mps_outf(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5) {
23801 return at::_ops::_lstm_mps_out::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4, out5);
23802}
23803
23804// aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()
23805inline void lstm_mps_backward_out(at::Tensor & out0, at::TensorList out1, at::TensorList out2, const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
23806 return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2);
23807}
23808// aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()
23809inline void lstm_mps_backward_outf(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) {
23810 return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2);
23811}
23812
23813// aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
23814inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias={}, const c10::optional<at::Tensor> & hidden_bias={}) {
23815 return at::_ops::_thnn_fused_lstm_cell_out::call(input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2);
23816}
23817// aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
23818inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_outf(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
23819 return at::_ops::_thnn_fused_lstm_cell_out::call(input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2);
23820}
23821
23822// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
23823inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
23824 return at::_ops::_thnn_fused_lstm_cell_backward_impl_out::call(grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2);
23825}
23826// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
23827inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_outf(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
23828 return at::_ops::_thnn_fused_lstm_cell_backward_impl_out::call(grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2);
23829}
23830
23831// aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23832inline ::std::tuple<at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias={}, const c10::optional<at::Tensor> & hidden_bias={}) {
23833 return at::_ops::_thnn_fused_gru_cell_out::call(input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1);
23834}
23835// aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23836inline ::std::tuple<at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_outf(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1) {
23837 return at::_ops::_thnn_fused_gru_cell_out::call(input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1);
23838}
23839
23840// aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
23841inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) {
23842 return at::_ops::_thnn_fused_gru_cell_backward_out::call(grad_hy, workspace, has_bias, out0, out1, out2, out3, out4);
23843}
23844// aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))
23845inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_backward_outf(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
23846 return at::_ops::_thnn_fused_gru_cell_backward_out::call(grad_hy, workspace, has_bias, out0, out1, out2, out3, out4);
23847}
23848
23849// aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23850inline ::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & lengths, bool batch_first) {
23851 return at::_ops::_pack_padded_sequence_out::call(input, lengths, batch_first, out0, out1);
23852}
23853// aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
23854inline ::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_outf(const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1) {
23855 return at::_ops::_pack_padded_sequence_out::call(input, lengths, batch_first, out0, out1);
23856}
23857
23858// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)
23859inline at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, at::Storage source) {
23860 return at::_ops::set_source_Storage_out::call(self, source, out);
23861}
23862// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)
23863inline at::Tensor & set_outf(const at::Tensor & self, at::Storage source, at::Tensor & out) {
23864 return at::_ops::set_source_Storage_out::call(self, source, out);
23865}
23866
23867// aten::set.source_Storage(Tensor self, Storage source) -> Tensor
23868inline at::Tensor set(const at::Tensor & self, at::Storage source) {
23869 return at::_ops::set_source_Storage::call(self, source);
23870}
23871
23872// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)
23873inline at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) {
23874 return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
23875}
23876namespace symint {
23877 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
23878 at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) {
23879 return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
23880 }
23881}
23882
23883// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)
23884inline at::Tensor & set_outf(const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) {
23885 return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
23886}
23887namespace symint {
23888 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
23889 at::Tensor & set_outf(const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) {
23890 return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
23891 }
23892}
23893
23894// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)
23895inline at::Tensor & set_symint_out(at::Tensor & out, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) {
23896 return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, size, stride, out);
23897}
23898namespace symint {
23899 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
23900 at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) {
23901 return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, size, stride, out);
23902 }
23903}
23904
23905// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)
23906inline at::Tensor & set_symint_outf(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
23907 return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, size, stride, out);
23908}
23909namespace symint {
23910 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
23911 at::Tensor & set_outf(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
23912 return at::_ops::set_source_Storage_storage_offset_out::call(self, source, storage_offset, size, stride, out);
23913 }
23914}
23915
23916// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor
23917inline at::Tensor set(const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) {
23918 return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
23919}
23920namespace symint {
23921 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
23922 at::Tensor set(const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) {
23923 return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
23924 }
23925}
23926
23927// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor
23928inline at::Tensor set_symint(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) {
23929 return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, size, stride);
23930}
23931namespace symint {
23932 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
23933 at::Tensor set(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) {
23934 return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, size, stride);
23935 }
23936}
23937
23938// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
23939inline at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & source) {
23940 return at::_ops::set_source_Tensor_out::call(self, source, out);
23941}
23942// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
23943inline at::Tensor & set_outf(const at::Tensor & self, const at::Tensor & source, at::Tensor & out) {
23944 return at::_ops::set_source_Tensor_out::call(self, source, out);
23945}
23946
23947// aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor
23948inline at::Tensor set(const at::Tensor & self, const at::Tensor & source) {
23949 return at::_ops::set_source_Tensor::call(self, source);
23950}
23951
23952// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23953inline at::Tensor & set_out(at::Tensor & out, const at::Tensor & self) {
23954 return at::_ops::set_out::call(self, out);
23955}
23956// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23957inline at::Tensor & set_outf(const at::Tensor & self, at::Tensor & out) {
23958 return at::_ops::set_out::call(self, out);
23959}
23960
23961// aten::set(Tensor self) -> Tensor
23962inline at::Tensor set(const at::Tensor & self) {
23963 return at::_ops::set::call(self);
23964}
23965
23966// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23967inline at::Tensor & lift_out(at::Tensor & out, const at::Tensor & self) {
23968 return at::_ops::lift_out::call(self, out);
23969}
23970// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23971inline at::Tensor & lift_outf(const at::Tensor & self, at::Tensor & out) {
23972 return at::_ops::lift_out::call(self, out);
23973}
23974
23975// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23977 return at::_ops::lift_fresh_copy_out::call(self, out);
23978}
23979// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
23981 return at::_ops::lift_fresh_copy_out::call(self, out);
23982}
23983
23984// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
23985inline at::Tensor & masked_fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
23986 return at::_ops::masked_fill_Scalar_out::call(self, mask, value, out);
23987}
23988// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
23989inline at::Tensor & masked_fill_outf(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) {
23990 return at::_ops::masked_fill_Scalar_out::call(self, mask, value, out);
23991}
23992
23993// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
23994inline at::Tensor & masked_fill_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
23995 return at::_ops::masked_fill_Tensor_out::call(self, mask, value, out);
23996}
23997// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
23998inline at::Tensor & masked_fill_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) {
23999 return at::_ops::masked_fill_Tensor_out::call(self, mask, value, out);
24000}
24001
24002// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
24003inline at::Tensor & masked_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
24004 return at::_ops::masked_scatter_out::call(self, mask, source, out);
24005}
24006// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)
24007inline at::Tensor & masked_scatter_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) {
24008 return at::_ops::masked_scatter_out::call(self, mask, source, out);
24009}
24010
24011// aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
24013 return at::_ops::_masked_softmax_out::call(self, mask, dim, mask_type, out);
24014}
24015// aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
24017 return at::_ops::_masked_softmax_out::call(self, mask, dim, mask_type, out);
24018}
24019
24020// aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
24021inline at::Tensor & _masked_softmax_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim=c10::nullopt) {
24022 return at::_ops::_masked_softmax_backward_out::call(grad_output, output, mask, dim, out);
24023}
24024// aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
24025inline at::Tensor & _masked_softmax_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim, at::Tensor & out) {
24026 return at::_ops::_masked_softmax_backward_out::call(grad_output, output, mask, dim, out);
24027}
24028
24029// aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
24030inline at::Tensor & put_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false) {
24031 return at::_ops::put_out::call(self, index, source, accumulate, out);
24032}
24033// aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)
24034inline at::Tensor & put_outf(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out) {
24035 return at::_ops::put_out::call(self, index, source, accumulate, out);
24036}
24037
24038// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
24039inline at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
24040 return at::_ops::index_fill_int_Scalar_out::call(self, dim, index, value, out);
24041}
24042// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
24043inline at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
24044 return at::_ops::index_fill_int_Scalar_out::call(self, dim, index, value, out);
24045}
24046
24047// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
24048inline at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
24049 return at::_ops::index_fill_int_Tensor_out::call(self, dim, index, value, out);
24050}
24051// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)
24052inline at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) {
24053 return at::_ops::index_fill_int_Tensor_out::call(self, dim, index, value, out);
24054}
24055
24056// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24057inline at::Tensor & bitwise_and_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) {
24058 return at::_ops::bitwise_and_Scalar_Tensor_out::call(self, other, out);
24059}
24060// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24061inline at::Tensor & bitwise_and_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
24062 return at::_ops::bitwise_and_Scalar_Tensor_out::call(self, other, out);
24063}
24064
24065// aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24066inline at::Tensor & bitwise_or_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) {
24067 return at::_ops::bitwise_or_Scalar_Tensor_out::call(self, other, out);
24068}
24069// aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24070inline at::Tensor & bitwise_or_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
24071 return at::_ops::bitwise_or_Scalar_Tensor_out::call(self, other, out);
24072}
24073
24074// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24075inline at::Tensor & bitwise_xor_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) {
24076 return at::_ops::bitwise_xor_Scalar_Tensor_out::call(self, other, out);
24077}
24078// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24079inline at::Tensor & bitwise_xor_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
24080 return at::_ops::bitwise_xor_Scalar_Tensor_out::call(self, other, out);
24081}
24082
24083// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
24084inline at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
24085 return at::_ops::__lshift___Scalar_out::call(self, other, out);
24086}
24087// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
24088inline at::Tensor & __lshift___outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
24089 return at::_ops::__lshift___Scalar_out::call(self, other, out);
24090}
24091
24092// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24093inline at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
24094 return at::_ops::__lshift___Tensor_out::call(self, other, out);
24095}
24096// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24097inline at::Tensor & __lshift___outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
24098 return at::_ops::__lshift___Tensor_out::call(self, other, out);
24099}
24100
24101// aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24102inline at::Tensor & bitwise_left_shift_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) {
24103 return at::_ops::bitwise_left_shift_Scalar_Tensor_out::call(self, other, out);
24104}
24105// aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24106inline at::Tensor & bitwise_left_shift_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
24107 return at::_ops::bitwise_left_shift_Scalar_Tensor_out::call(self, other, out);
24108}
24109
24110// aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
24111inline at::Tensor & __rshift___out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) {
24112 return at::_ops::__rshift___Scalar_out::call(self, other, out);
24113}
24114// aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
24115inline at::Tensor & __rshift___outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
24116 return at::_ops::__rshift___Scalar_out::call(self, other, out);
24117}
24118
24119// aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24120inline at::Tensor & __rshift___out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
24121 return at::_ops::__rshift___Tensor_out::call(self, other, out);
24122}
24123// aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24124inline at::Tensor & __rshift___outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
24125 return at::_ops::__rshift___Tensor_out::call(self, other, out);
24126}
24127
24128// aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24129inline at::Tensor & bitwise_right_shift_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) {
24130 return at::_ops::bitwise_right_shift_Scalar_Tensor_out::call(self, other, out);
24131}
24132// aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24133inline at::Tensor & bitwise_right_shift_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
24134 return at::_ops::bitwise_right_shift_Scalar_Tensor_out::call(self, other, out);
24135}
24136
24137// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24139 return at::_ops::random_from_out::call(self, from, to, generator, out);
24140}
24141// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24142inline at::Tensor & random_outf(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator, at::Tensor & out) {
24143 return at::_ops::random_from_out::call(self, from, to, generator, out);
24144}
24145
24146// aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor
24148 return at::_ops::random_from::call(self, from, to, generator);
24149}
24150
24151// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24152inline at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator=c10::nullopt) {
24153 return at::_ops::random_to_out::call(self, to, generator, out);
24154}
24155// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24156inline at::Tensor & random_outf(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator, at::Tensor & out) {
24157 return at::_ops::random_to_out::call(self, to, generator, out);
24158}
24159
24160// aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor
24161inline at::Tensor random(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator=c10::nullopt) {
24162 return at::_ops::random_to::call(self, to, generator);
24163}
24164
24165// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24167 return at::_ops::random_out::call(self, generator, out);
24168}
24169// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24171 return at::_ops::random_out::call(self, generator, out);
24172}
24173
24174// aten::random(Tensor self, *, Generator? generator=None) -> Tensor
24176 return at::_ops::random::call(self, generator);
24177}
24178
24179// aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24180inline at::Tensor & uniform_out(at::Tensor & out, const at::Tensor & self, double from=0, double to=1, c10::optional<at::Generator> generator=c10::nullopt) {
24181 return at::_ops::uniform_out::call(self, from, to, generator, out);
24182}
24183// aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24184inline at::Tensor & uniform_outf(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator, at::Tensor & out) {
24185 return at::_ops::uniform_out::call(self, from, to, generator, out);
24186}
24187
24188// aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor
24189inline at::Tensor uniform(const at::Tensor & self, double from=0, double to=1, c10::optional<at::Generator> generator=c10::nullopt) {
24190 return at::_ops::uniform::call(self, from, to, generator);
24191}
24192
24193// aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24194inline at::Tensor & cauchy_out(at::Tensor & out, const at::Tensor & self, double median=0, double sigma=1, c10::optional<at::Generator> generator=c10::nullopt) {
24195 return at::_ops::cauchy_out::call(self, median, sigma, generator, out);
24196}
24197// aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24198inline at::Tensor & cauchy_outf(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator, at::Tensor & out) {
24199 return at::_ops::cauchy_out::call(self, median, sigma, generator, out);
24200}
24201
24202// aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor
24203inline at::Tensor cauchy(const at::Tensor & self, double median=0, double sigma=1, c10::optional<at::Generator> generator=c10::nullopt) {
24204 return at::_ops::cauchy::call(self, median, sigma, generator);
24205}
24206
24207// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24208inline at::Tensor & log_normal_out(at::Tensor & out, const at::Tensor & self, double mean=1, double std=2, c10::optional<at::Generator> generator=c10::nullopt) {
24209 return at::_ops::log_normal_out::call(self, mean, std, generator, out);
24210}
24211// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24212inline at::Tensor & log_normal_outf(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) {
24213 return at::_ops::log_normal_out::call(self, mean, std, generator, out);
24214}
24215
24216// aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor
24217inline at::Tensor log_normal(const at::Tensor & self, double mean=1, double std=2, c10::optional<at::Generator> generator=c10::nullopt) {
24218 return at::_ops::log_normal::call(self, mean, std, generator);
24219}
24220
24221// aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24222inline at::Tensor & exponential_out(at::Tensor & out, const at::Tensor & self, double lambd=1, c10::optional<at::Generator> generator=c10::nullopt) {
24223 return at::_ops::exponential_out::call(self, lambd, generator, out);
24224}
24225// aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24226inline at::Tensor & exponential_outf(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator, at::Tensor & out) {
24227 return at::_ops::exponential_out::call(self, lambd, generator, out);
24228}
24229
24230// aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor
24231inline at::Tensor exponential(const at::Tensor & self, double lambd=1, c10::optional<at::Generator> generator=c10::nullopt) {
24232 return at::_ops::exponential::call(self, lambd, generator);
24233}
24234
24235// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24237 return at::_ops::geometric_out::call(self, p, generator, out);
24238}
24239// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24240inline at::Tensor & geometric_outf(const at::Tensor & self, double p, c10::optional<at::Generator> generator, at::Tensor & out) {
24241 return at::_ops::geometric_out::call(self, p, generator, out);
24242}
24243
24244// aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor
24246 return at::_ops::geometric::call(self, p, generator);
24247}
24248
24249// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
24250inline at::Tensor & tril_indices_out(at::Tensor & out, int64_t row, int64_t col, int64_t offset=0) {
24251 return at::_ops::tril_indices_out::call(row, col, offset, out);
24252}
24253// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
24254inline at::Tensor & tril_indices_outf(int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
24255 return at::_ops::tril_indices_out::call(row, col, offset, out);
24256}
24257
24258// aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
24259inline at::Tensor & triu_indices_out(at::Tensor & out, int64_t row, int64_t col, int64_t offset=0) {
24260 return at::_ops::triu_indices_out::call(row, col, offset, out);
24261}
24262// aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
24263inline at::Tensor & triu_indices_outf(int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
24264 return at::_ops::triu_indices_out::call(row, col, offset, out);
24265}
24266
24267// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
24268inline at::Tensor & trace_out(at::Tensor & out, const at::Tensor & self) {
24269 return at::_ops::trace_out::call(self, out);
24270}
24271// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
24272inline at::Tensor & trace_outf(const at::Tensor & self, at::Tensor & out) {
24273 return at::_ops::trace_out::call(self, out);
24274}
24275
24276// aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)
24277inline at::Tensor & _cholesky_solve_helper_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & A, bool upper) {
24278 return at::_ops::_cholesky_solve_helper_out::call(self, A, upper, out);
24279}
24280// aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)
24281inline at::Tensor & _cholesky_solve_helper_outf(const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) {
24282 return at::_ops::_cholesky_solve_helper_out::call(self, A, upper, out);
24283}
24284
24285// aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
24286inline at::Tensor & dist_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p=2) {
24287 return at::_ops::dist_out::call(self, other, p, out);
24288}
24289// aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)
24290inline at::Tensor & dist_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p, at::Tensor & out) {
24291 return at::_ops::dist_out::call(self, other, p, out);
24292}
24293
24294// aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()
24295inline void _histogramdd_bin_edges_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range=c10::nullopt, const c10::optional<at::Tensor> & weight={}, bool density=false) {
24296 return at::_ops::_histogramdd_bin_edges_out::call(self, bins, range, weight, density, out);
24297}
24298// aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()
24299inline void _histogramdd_bin_edges_outf(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::TensorList out) {
24300 return at::_ops::_histogramdd_bin_edges_out::call(self, bins, range, weight, density, out);
24301}
24302
24303// aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
24304inline at::Tensor & _histogramdd_from_bin_cts_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range=c10::nullopt, const c10::optional<at::Tensor> & weight={}, bool density=false) {
24305 return at::_ops::_histogramdd_from_bin_cts_out::call(self, bins, range, weight, density, out);
24306}
24307// aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
24308inline at::Tensor & _histogramdd_from_bin_cts_outf(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
24309 return at::_ops::_histogramdd_from_bin_cts_out::call(self, bins, range, weight, density, out);
24310}
24311
24312// aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
24313inline at::Tensor & _histogramdd_from_bin_tensors_out(at::Tensor & out, const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight={}, bool density=false) {
24314 return at::_ops::_histogramdd_from_bin_tensors_out::call(self, bins, weight, density, out);
24315}
24316// aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)
24317inline at::Tensor & _histogramdd_from_bin_tensors_outf(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
24318 return at::_ops::_histogramdd_from_bin_tensors_out::call(self, bins, weight, density, out);
24319}
24320
24321// aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24322inline at::Tensor & remainder_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) {
24323 return at::_ops::remainder_Scalar_Tensor_out::call(self, other, out);
24324}
24325// aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
24326inline at::Tensor & remainder_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
24327 return at::_ops::remainder_Scalar_Tensor_out::call(self, other, out);
24328}
24329
24330// aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)
24331inline at::Tensor & argsort_out(at::Tensor & out, const at::Tensor & self, bool stable, int64_t dim=-1, bool descending=false) {
24332 return at::_ops::argsort_stable_out::call(self, stable, dim, descending, out);
24333}
24334// aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)
24335inline at::Tensor & argsort_outf(const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out) {
24336 return at::_ops::argsort_stable_out::call(self, stable, dim, descending, out);
24337}
24338
24339// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
24340inline at::Tensor & unfold_backward_out(at::Tensor & out, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
24341 return at::_ops::unfold_backward_out::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out);
24342}
24343namespace symint {
24344 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
24345 at::Tensor & unfold_backward_out(at::Tensor & out, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
24346 return at::_ops::unfold_backward_out::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out);
24347 }
24348}
24349
24350// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
24351inline at::Tensor & unfold_backward_outf(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) {
24352 return at::_ops::unfold_backward_out::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out);
24353}
24354namespace symint {
24355 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
24356 at::Tensor & unfold_backward_outf(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) {
24357 return at::_ops::unfold_backward_out::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out);
24358 }
24359}
24360
24361// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
24362inline at::Tensor & unfold_backward_symint_out(at::Tensor & out, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
24363 return at::_ops::unfold_backward_out::call(grad_in, input_sizes, dim, size, step, out);
24364}
24365namespace symint {
24366 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
24367 at::Tensor & unfold_backward_out(at::Tensor & out, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
24368 return at::_ops::unfold_backward_out::call(grad_in, input_sizes, dim, size, step, out);
24369 }
24370}
24371
24372// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
24373inline at::Tensor & unfold_backward_symint_outf(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) {
24374 return at::_ops::unfold_backward_out::call(grad_in, input_sizes, dim, size, step, out);
24375}
24376namespace symint {
24377 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
24378 at::Tensor & unfold_backward_outf(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) {
24379 return at::_ops::unfold_backward_out::call(grad_in, input_sizes, dim, size, step, out);
24380 }
24381}
24382
24383// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24384inline at::Tensor & normal_out(at::Tensor & out, const at::Tensor & self, double mean=0, double std=1, c10::optional<at::Generator> generator=c10::nullopt) {
24385 return at::_ops::normal_out::call(self, mean, std, generator, out);
24386}
24387// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
24388inline at::Tensor & normal_outf(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator, at::Tensor & out) {
24389 return at::_ops::normal_out::call(self, mean, std, generator, out);
24390}
24391
24392// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()
24394 return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out);
24395}
24396// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()
24398 return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out);
24399}
24400
24401// aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)
24402inline ::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) {
24403 return at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self, found_inf, inv_scale);
24404}
24405
24406// aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!)
24407inline at::Tensor & _amp_update_scale_out(at::Tensor & out, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
24408 return at::_ops::_amp_update_scale_out::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out);
24409}
24410// aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!)
24411inline at::Tensor & _amp_update_scale_outf(const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) {
24412 return at::_ops::_amp_update_scale_out::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out);
24413}
24414
24415// aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out)
24416inline ::std::tuple<at::Tensor,at::Tensor> _amp_update_scale(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
24417 return at::_ops::_amp_update_scale::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
24418}
24419
24420// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24421inline void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) {
24422 return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out);
24423}
24424// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24425inline void _foreach_add_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
24426 return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out);
24427}
24428
24429// aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24430inline void _foreach_sub_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) {
24431 return at::_ops::_foreach_sub_Scalar_out::call(self, scalar, out);
24432}
24433// aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24434inline void _foreach_sub_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
24435 return at::_ops::_foreach_sub_Scalar_out::call(self, scalar, out);
24436}
24437
24438// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24439inline void _foreach_mul_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) {
24440 return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out);
24441}
24442// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24443inline void _foreach_mul_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
24444 return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out);
24445}
24446
24447// aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24448inline void _foreach_div_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) {
24449 return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out);
24450}
24451// aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24452inline void _foreach_div_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
24453 return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out);
24454}
24455
24456// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24457inline void _foreach_clamp_min_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) {
24458 return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out);
24459}
24460// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24461inline void _foreach_clamp_min_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
24462 return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out);
24463}
24464
24465// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24466inline void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) {
24467 return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out);
24468}
24469// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24470inline void _foreach_clamp_max_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
24471 return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out);
24472}
24473
24474// aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24475inline void _foreach_maximum_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) {
24476 return at::_ops::_foreach_maximum_Scalar_out::call(self, scalar, out);
24477}
24478// aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24479inline void _foreach_maximum_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
24480 return at::_ops::_foreach_maximum_Scalar_out::call(self, scalar, out);
24481}
24482
24483// aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24484inline void _foreach_minimum_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) {
24485 return at::_ops::_foreach_minimum_Scalar_out::call(self, scalar, out);
24486}
24487// aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()
24488inline void _foreach_minimum_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
24489 return at::_ops::_foreach_minimum_Scalar_out::call(self, scalar, out);
24490}
24491
24492// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
24493inline void _foreach_add_out(at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) {
24494 return at::_ops::_foreach_add_List_out::call(self, other, alpha, out);
24495}
24496// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
24497inline void _foreach_add_outf(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
24498 return at::_ops::_foreach_add_List_out::call(self, other, alpha, out);
24499}
24500
24501// aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
24502inline void _foreach_sub_out(at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) {
24503 return at::_ops::_foreach_sub_List_out::call(self, other, alpha, out);
24504}
24505// aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()
24506inline void _foreach_sub_outf(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
24507 return at::_ops::_foreach_sub_List_out::call(self, other, alpha, out);
24508}
24509
24510// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
24512 return at::_ops::_foreach_mul_List_out::call(self, other, out);
24513}
24514// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
24516 return at::_ops::_foreach_mul_List_out::call(self, other, out);
24517}
24518
24519// aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
24521 return at::_ops::_foreach_div_List_out::call(self, other, out);
24522}
24523// aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
24525 return at::_ops::_foreach_div_List_out::call(self, other, out);
24526}
24527
24528// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
24530 return at::_ops::_foreach_clamp_min_List_out::call(self, other, out);
24531}
24532// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
24534 return at::_ops::_foreach_clamp_min_List_out::call(self, other, out);
24535}
24536
24537// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
24539 return at::_ops::_foreach_clamp_max_List_out::call(self, other, out);
24540}
24541// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
24543 return at::_ops::_foreach_clamp_max_List_out::call(self, other, out);
24544}
24545
24546// aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
24548 return at::_ops::_foreach_maximum_List_out::call(self, other, out);
24549}
24550// aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
24552 return at::_ops::_foreach_maximum_List_out::call(self, other, out);
24553}
24554
24555// aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
24557 return at::_ops::_foreach_minimum_List_out::call(self, other, out);
24558}
24559// aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()
24561 return at::_ops::_foreach_minimum_List_out::call(self, other, out);
24562}
24563
24564// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24565inline void _foreach_add_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
24566 return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out);
24567}
24568// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24569inline void _foreach_add_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
24570 return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out);
24571}
24572
24573// aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24574inline void _foreach_sub_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
24575 return at::_ops::_foreach_sub_ScalarList_out::call(self, scalars, out);
24576}
24577// aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24578inline void _foreach_sub_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
24579 return at::_ops::_foreach_sub_ScalarList_out::call(self, scalars, out);
24580}
24581
24582// aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24583inline void _foreach_div_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
24584 return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out);
24585}
24586// aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24587inline void _foreach_div_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
24588 return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out);
24589}
24590
24591// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24592inline void _foreach_mul_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
24593 return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out);
24594}
24595// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24596inline void _foreach_mul_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
24597 return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out);
24598}
24599
24600// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24601inline void _foreach_clamp_min_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
24602 return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out);
24603}
24604// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24605inline void _foreach_clamp_min_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
24606 return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out);
24607}
24608
24609// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24610inline void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
24611 return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out);
24612}
24613// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24614inline void _foreach_clamp_max_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
24615 return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out);
24616}
24617
24618// aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24619inline void _foreach_maximum_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
24620 return at::_ops::_foreach_maximum_ScalarList_out::call(self, scalars, out);
24621}
24622// aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24623inline void _foreach_maximum_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
24624 return at::_ops::_foreach_maximum_ScalarList_out::call(self, scalars, out);
24625}
24626
24627// aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24628inline void _foreach_minimum_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
24629 return at::_ops::_foreach_minimum_ScalarList_out::call(self, scalars, out);
24630}
24631// aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24632inline void _foreach_minimum_outf(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
24633 return at::_ops::_foreach_minimum_ScalarList_out::call(self, scalars, out);
24634}
24635
24636// aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24638 return at::_ops::_foreach_exp_out::call(self, out);
24639}
24640// aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24642 return at::_ops::_foreach_exp_out::call(self, out);
24643}
24644
24645// aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24647 return at::_ops::_foreach_zero_out::call(self, out);
24648}
24649// aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24651 return at::_ops::_foreach_zero_out::call(self, out);
24652}
24653
24654// aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out
24655inline ::std::vector<at::Tensor> _foreach_zero(at::TensorList self) {
24656 return at::_ops::_foreach_zero::call(self);
24657}
24658
24659// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24661 return at::_ops::_foreach_sqrt_out::call(self, out);
24662}
24663// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24665 return at::_ops::_foreach_sqrt_out::call(self, out);
24666}
24667
24668// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24670 return at::_ops::_foreach_abs_out::call(self, out);
24671}
24672// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24674 return at::_ops::_foreach_abs_out::call(self, out);
24675}
24676
24677// aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24679 return at::_ops::_foreach_acos_out::call(self, out);
24680}
24681// aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24683 return at::_ops::_foreach_acos_out::call(self, out);
24684}
24685
24686// aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24688 return at::_ops::_foreach_asin_out::call(self, out);
24689}
24690// aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24692 return at::_ops::_foreach_asin_out::call(self, out);
24693}
24694
24695// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24697 return at::_ops::_foreach_atan_out::call(self, out);
24698}
24699// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24701 return at::_ops::_foreach_atan_out::call(self, out);
24702}
24703
24704// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24706 return at::_ops::_foreach_ceil_out::call(self, out);
24707}
24708// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24710 return at::_ops::_foreach_ceil_out::call(self, out);
24711}
24712
24713// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24715 return at::_ops::_foreach_cos_out::call(self, out);
24716}
24717// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24719 return at::_ops::_foreach_cos_out::call(self, out);
24720}
24721
24722// aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24724 return at::_ops::_foreach_cosh_out::call(self, out);
24725}
24726// aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24728 return at::_ops::_foreach_cosh_out::call(self, out);
24729}
24730
24731// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24733 return at::_ops::_foreach_erf_out::call(self, out);
24734}
24735// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24737 return at::_ops::_foreach_erf_out::call(self, out);
24738}
24739
24740// aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24742 return at::_ops::_foreach_erfc_out::call(self, out);
24743}
24744// aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24746 return at::_ops::_foreach_erfc_out::call(self, out);
24747}
24748
24749// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24751 return at::_ops::_foreach_expm1_out::call(self, out);
24752}
24753// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24755 return at::_ops::_foreach_expm1_out::call(self, out);
24756}
24757
24758// aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24760 return at::_ops::_foreach_floor_out::call(self, out);
24761}
24762// aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24764 return at::_ops::_foreach_floor_out::call(self, out);
24765}
24766
24767// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24769 return at::_ops::_foreach_log_out::call(self, out);
24770}
24771// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24773 return at::_ops::_foreach_log_out::call(self, out);
24774}
24775
24776// aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24778 return at::_ops::_foreach_log10_out::call(self, out);
24779}
24780// aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24782 return at::_ops::_foreach_log10_out::call(self, out);
24783}
24784
24785// aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24787 return at::_ops::_foreach_log1p_out::call(self, out);
24788}
24789// aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24791 return at::_ops::_foreach_log1p_out::call(self, out);
24792}
24793
24794// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24796 return at::_ops::_foreach_log2_out::call(self, out);
24797}
24798// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24800 return at::_ops::_foreach_log2_out::call(self, out);
24801}
24802
24803// aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24805 return at::_ops::_foreach_neg_out::call(self, out);
24806}
24807// aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24809 return at::_ops::_foreach_neg_out::call(self, out);
24810}
24811
24812// aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24814 return at::_ops::_foreach_tan_out::call(self, out);
24815}
24816// aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24818 return at::_ops::_foreach_tan_out::call(self, out);
24819}
24820
24821// aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24823 return at::_ops::_foreach_tanh_out::call(self, out);
24824}
24825// aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24827 return at::_ops::_foreach_tanh_out::call(self, out);
24828}
24829
24830// aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24832 return at::_ops::_foreach_sin_out::call(self, out);
24833}
24834// aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24836 return at::_ops::_foreach_sin_out::call(self, out);
24837}
24838
24839// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24841 return at::_ops::_foreach_sinh_out::call(self, out);
24842}
24843// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24845 return at::_ops::_foreach_sinh_out::call(self, out);
24846}
24847
24848// aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24850 return at::_ops::_foreach_round_out::call(self, out);
24851}
24852// aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24854 return at::_ops::_foreach_round_out::call(self, out);
24855}
24856
24857// aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24859 return at::_ops::_foreach_lgamma_out::call(self, out);
24860}
24861// aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24863 return at::_ops::_foreach_lgamma_out::call(self, out);
24864}
24865
24866// aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24868 return at::_ops::_foreach_frac_out::call(self, out);
24869}
24870// aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24872 return at::_ops::_foreach_frac_out::call(self, out);
24873}
24874
24875// aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24877 return at::_ops::_foreach_reciprocal_out::call(self, out);
24878}
24879// aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24881 return at::_ops::_foreach_reciprocal_out::call(self, out);
24882}
24883
24884// aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24886 return at::_ops::_foreach_sigmoid_out::call(self, out);
24887}
24888// aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24890 return at::_ops::_foreach_sigmoid_out::call(self, out);
24891}
24892
24893// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24895 return at::_ops::_foreach_trunc_out::call(self, out);
24896}
24897// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
24899 return at::_ops::_foreach_trunc_out::call(self, out);
24900}
24901
24902// aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
24903inline void _foreach_addcdiv_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) {
24904 return at::_ops::_foreach_addcdiv_Scalar_out::call(self, tensor1, tensor2, value, out);
24905}
24906// aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
24907inline void _foreach_addcdiv_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
24908 return at::_ops::_foreach_addcdiv_Scalar_out::call(self, tensor1, tensor2, value, out);
24909}
24910
24911// aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
24912inline void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) {
24913 return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out);
24914}
24915// aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()
24916inline void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
24917 return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out);
24918}
24919
24920// aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24921inline void _foreach_addcdiv_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
24922 return at::_ops::_foreach_addcdiv_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
24923}
24924// aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24925inline void _foreach_addcdiv_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
24926 return at::_ops::_foreach_addcdiv_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
24927}
24928
24929// aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
24930inline void _foreach_addcdiv_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
24931 return at::_ops::_foreach_addcdiv_Tensor_out::call(self, tensor1, tensor2, scalars, out);
24932}
24933// aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
24934inline void _foreach_addcdiv_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
24935 return at::_ops::_foreach_addcdiv_Tensor_out::call(self, tensor1, tensor2, scalars, out);
24936}
24937
24938// aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24939inline void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
24940 return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
24941}
24942// aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()
24943inline void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
24944 return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
24945}
24946
24947// aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
24948inline void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
24949 return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out);
24950}
24951// aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> ()
24952inline void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
24953 return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out);
24954}
24955
24956// aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> ()
24957inline void _foreach_norm_out(at::TensorList out, at::TensorList self, const at::Scalar & ord=2) {
24958 return at::_ops::_foreach_norm_Scalar_out::call(self, ord, out);
24959}
24960// aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> ()
24961inline void _foreach_norm_outf(at::TensorList self, const at::Scalar & ord, at::TensorList out) {
24962 return at::_ops::_foreach_norm_Scalar_out::call(self, ord, out);
24963}
24964
24965// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()
24967 return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out);
24968}
24969// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()
24971 return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out);
24972}
24973
24974// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()
24975inline void _foreach_lerp_out(at::TensorList out, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
24976 return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out);
24977}
24978// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()
24979inline void _foreach_lerp_outf(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) {
24980 return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out);
24981}
24982
24983// aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
24984inline at::Tensor & bucketize_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false) {
24985 return at::_ops::bucketize_Scalar_out::call(self, boundaries, out_int32, right, out);
24986}
24987// aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
24988inline at::Tensor & bucketize_outf(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
24989 return at::_ops::bucketize_Scalar_out::call(self, boundaries, out_int32, right, out);
24990}
24991
24992// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
24993inline at::Tensor & searchsorted_out(at::Tensor & out, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional<c10::string_view> side=c10::nullopt, const c10::optional<at::Tensor> & sorter={}) {
24994 return at::_ops::searchsorted_Scalar_out::call(sorted_sequence, self, out_int32, right, side, sorter, out);
24995}
24996// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)
24997inline at::Tensor & searchsorted_outf(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter, at::Tensor & out) {
24998 return at::_ops::searchsorted_Scalar_out::call(sorted_sequence, self, out_int32, right, side, sorter, out);
24999}
25000
25001// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
25002inline at::Tensor & glu_jvp_out(at::Tensor & out, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
25003 return at::_ops::glu_jvp_out::call(glu, x, dx, dim, out);
25004}
25005// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
25006inline at::Tensor & glu_jvp_outf(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
25007 return at::_ops::glu_jvp_out::call(glu, x, dx, dim, out);
25008}
25009
25010// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
25011inline at::Tensor & glu_backward_jvp_out(at::Tensor & out, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
25012 return at::_ops::glu_backward_jvp_out::call(grad_x, grad_glu, x, dgrad_glu, dx, dim, out);
25013}
25014// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
25015inline at::Tensor & glu_backward_jvp_outf(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
25016 return at::_ops::glu_backward_jvp_out::call(grad_x, grad_glu, x, dgrad_glu, dx, dim, out);
25017}
25018
25019// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25020inline at::Tensor & hardswish_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) {
25021 return at::_ops::hardswish_backward_out::call(grad_output, self, out);
25022}
25023// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25024inline at::Tensor & hardswish_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
25025 return at::_ops::hardswish_backward_out::call(grad_output, self, out);
25026}
25027
25028// aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)
25029inline at::Tensor & rrelu_with_noise_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) {
25030 return at::_ops::rrelu_with_noise_backward_out::call(grad_output, self, noise, lower, upper, training, self_is_result, out);
25031}
25032// aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)
25033inline at::Tensor & rrelu_with_noise_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result, at::Tensor & out) {
25034 return at::_ops::rrelu_with_noise_backward_out::call(grad_output, self, noise, lower, upper, training, self_is_result, out);
25035}
25036
25037// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25038inline at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) {
25039 return at::_ops::mkldnn_adaptive_avg_pool2d_backward_out::call(grad_output, self, out);
25040}
25041// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25042inline at::Tensor & mkldnn_adaptive_avg_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
25043 return at::_ops::mkldnn_adaptive_avg_pool2d_backward_out::call(grad_output, self, out);
25044}
25045
25046// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
25047inline at::Tensor & _adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
25048 return at::_ops::_adaptive_avg_pool2d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
25049}
25050namespace symint {
25051 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25052 at::Tensor & _adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
25053 return at::_ops::_adaptive_avg_pool2d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
25054 }
25055}
25056
25057// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
25058inline at::Tensor & _adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
25059 return at::_ops::_adaptive_avg_pool2d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
25060}
25061namespace symint {
25062 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25063 at::Tensor & _adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
25064 return at::_ops::_adaptive_avg_pool2d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
25065 }
25066}
25067
25068// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
25069inline at::Tensor & _adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
25070 return at::_ops::_adaptive_avg_pool2d_out::call(self, output_size, out);
25071}
25072namespace symint {
25073 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25074 at::Tensor & _adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
25075 return at::_ops::_adaptive_avg_pool2d_out::call(self, output_size, out);
25076 }
25077}
25078
25079// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
25080inline at::Tensor & _adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
25081 return at::_ops::_adaptive_avg_pool2d_out::call(self, output_size, out);
25082}
25083namespace symint {
25084 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25085 at::Tensor & _adaptive_avg_pool2d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
25086 return at::_ops::_adaptive_avg_pool2d_out::call(self, output_size, out);
25087 }
25088}
25089
25090// aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25091inline at::Tensor & _adaptive_avg_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) {
25092 return at::_ops::_adaptive_avg_pool2d_backward_out::call(grad_output, self, out);
25093}
25094// aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25095inline at::Tensor & _adaptive_avg_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
25096 return at::_ops::_adaptive_avg_pool2d_backward_out::call(grad_output, self, out);
25097}
25098
25099// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
25100inline at::Tensor & _adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
25101 return at::_ops::_adaptive_avg_pool3d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
25102}
25103namespace symint {
25104 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25105 at::Tensor & _adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) {
25106 return at::_ops::_adaptive_avg_pool3d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
25107 }
25108}
25109
25110// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
25111inline at::Tensor & _adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
25112 return at::_ops::_adaptive_avg_pool3d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
25113}
25114namespace symint {
25115 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25116 at::Tensor & _adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
25117 return at::_ops::_adaptive_avg_pool3d_out::call(self, c10::fromIntArrayRefSlow(output_size), out);
25118 }
25119}
25120
25121// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
25122inline at::Tensor & _adaptive_avg_pool3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
25123 return at::_ops::_adaptive_avg_pool3d_out::call(self, output_size, out);
25124}
25125namespace symint {
25126 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25127 at::Tensor & _adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) {
25128 return at::_ops::_adaptive_avg_pool3d_out::call(self, output_size, out);
25129 }
25130}
25131
25132// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
25133inline at::Tensor & _adaptive_avg_pool3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
25134 return at::_ops::_adaptive_avg_pool3d_out::call(self, output_size, out);
25135}
25136namespace symint {
25137 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25138 at::Tensor & _adaptive_avg_pool3d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
25139 return at::_ops::_adaptive_avg_pool3d_out::call(self, output_size, out);
25140 }
25141}
25142
25143// aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25144inline at::Tensor & _adaptive_avg_pool3d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) {
25145 return at::_ops::_adaptive_avg_pool3d_backward_out::call(grad_output, self, out);
25146}
25147// aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25148inline at::Tensor & _adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
25149 return at::_ops::_adaptive_avg_pool3d_backward_out::call(grad_output, self, out);
25150}
25151
25152// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
25153inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
25154 return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2);
25155}
25156// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
25157inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
25158 return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2);
25159}
25160
25161// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!)
25162inline at::Tensor & conv_depthwise3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) {
25163 return at::_ops::conv_depthwise3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
25164}
25165namespace symint {
25166 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25167 at::Tensor & conv_depthwise3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) {
25168 return at::_ops::conv_depthwise3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
25169 }
25170}
25171
25172// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!)
25173inline at::Tensor & conv_depthwise3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
25174 return at::_ops::conv_depthwise3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
25175}
25176namespace symint {
25177 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25178 at::Tensor & conv_depthwise3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
25179 return at::_ops::conv_depthwise3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
25180 }
25181}
25182
25183// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!)
25184inline at::Tensor & conv_depthwise3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
25185 return at::_ops::conv_depthwise3d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
25186}
25187namespace symint {
25188 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25189 at::Tensor & conv_depthwise3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
25190 return at::_ops::conv_depthwise3d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
25191 }
25192}
25193
25194// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!)
25195inline at::Tensor & conv_depthwise3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
25196 return at::_ops::conv_depthwise3d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
25197}
25198namespace symint {
25199 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25200 at::Tensor & conv_depthwise3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
25201 return at::_ops::conv_depthwise3d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
25202 }
25203}
25204
25205// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
25206inline at::Tensor & slow_conv_dilated2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) {
25207 return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
25208}
25209namespace symint {
25210 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25211 at::Tensor & slow_conv_dilated2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) {
25212 return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
25213 }
25214}
25215
25216// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
25217inline at::Tensor & slow_conv_dilated2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
25218 return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
25219}
25220namespace symint {
25221 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25222 at::Tensor & slow_conv_dilated2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
25223 return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
25224 }
25225}
25226
25227// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
25228inline at::Tensor & slow_conv_dilated2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
25229 return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
25230}
25231namespace symint {
25232 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25233 at::Tensor & slow_conv_dilated2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
25234 return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
25235 }
25236}
25237
25238// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
25239inline at::Tensor & slow_conv_dilated2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
25240 return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
25241}
25242namespace symint {
25243 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25244 at::Tensor & slow_conv_dilated2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
25245 return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
25246 }
25247}
25248
25249// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
25250inline at::Tensor & slow_conv_dilated3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) {
25251 return at::_ops::slow_conv_dilated3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
25252}
25253namespace symint {
25254 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25255 at::Tensor & slow_conv_dilated3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) {
25256 return at::_ops::slow_conv_dilated3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
25257 }
25258}
25259
25260// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
25261inline at::Tensor & slow_conv_dilated3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
25262 return at::_ops::slow_conv_dilated3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
25263}
25264namespace symint {
25265 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25266 at::Tensor & slow_conv_dilated3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
25267 return at::_ops::slow_conv_dilated3d_out::call(self, weight, kernel_size, bias, stride, c10::fromIntArrayRefSlow(padding), dilation, out);
25268 }
25269}
25270
25271// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
25272inline at::Tensor & slow_conv_dilated3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
25273 return at::_ops::slow_conv_dilated3d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
25274}
25275namespace symint {
25276 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25277 at::Tensor & slow_conv_dilated3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), at::IntArrayRef dilation=1) {
25278 return at::_ops::slow_conv_dilated3d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
25279 }
25280}
25281
25282// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
25283inline at::Tensor & slow_conv_dilated3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
25284 return at::_ops::slow_conv_dilated3d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
25285}
25286namespace symint {
25287 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25288 at::Tensor & slow_conv_dilated3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) {
25289 return at::_ops::slow_conv_dilated3d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out);
25290 }
25291}
25292
25293// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25294inline at::Tensor & isinf_out(at::Tensor & out, const at::Tensor & self) {
25295 return at::_ops::isinf_out::call(self, out);
25296}
25297// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25298inline at::Tensor & isinf_outf(const at::Tensor & self, at::Tensor & out) {
25299 return at::_ops::isinf_out::call(self, out);
25300}
25301
25302// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25304 return at::_ops::linalg_matrix_exp_out::call(self, out);
25305}
25306// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25308 return at::_ops::linalg_matrix_exp_out::call(self, out);
25309}
25310
25311// aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)
25312inline at::Tensor & _test_optional_intlist_out(at::Tensor & out, const at::Tensor & values, at::OptionalIntArrayRef addends) {
25313 return at::_ops::_test_optional_intlist_out::call(values, addends, out);
25314}
25315// aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)
25316inline at::Tensor & _test_optional_intlist_outf(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
25317 return at::_ops::_test_optional_intlist_out::call(values, addends, out);
25318}
25319
25320// aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)
25321inline at::Tensor & _test_optional_filled_intlist_out(at::Tensor & out, const at::Tensor & values, at::OptionalIntArrayRef addends) {
25322 return at::_ops::_test_optional_filled_intlist_out::call(values, addends, out);
25323}
25324// aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)
25325inline at::Tensor & _test_optional_filled_intlist_outf(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
25326 return at::_ops::_test_optional_filled_intlist_out::call(values, addends, out);
25327}
25328
25329// aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)
25330inline at::Tensor & _test_optional_floatlist_out(at::Tensor & out, const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends) {
25331 return at::_ops::_test_optional_floatlist_out::call(values, addends, out);
25332}
25333// aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)
25334inline at::Tensor & _test_optional_floatlist_outf(const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends, at::Tensor & out) {
25335 return at::_ops::_test_optional_floatlist_out::call(values, addends, out);
25336}
25337
25338// aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25340 return at::_ops::_test_warn_in_autograd_out::call(self, out);
25341}
25342// aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25344 return at::_ops::_test_warn_in_autograd_out::call(self, out);
25345}
25346
25347// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25349 return at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::call(self, out);
25350}
25351// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25353 return at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::call(self, out);
25354}
25355
25356// aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25358 return at::_ops::_test_autograd_multiple_dispatch_view_copy_out::call(self, out);
25359}
25360// aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25362 return at::_ops::_test_autograd_multiple_dispatch_view_copy_out::call(self, out);
25363}
25364
25365// aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
25366inline at::Tensor & segment_reduce_out(at::Tensor & out, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths={}, const c10::optional<at::Tensor> & indices={}, const c10::optional<at::Tensor> & offsets={}, int64_t axis=0, bool unsafe=false, const c10::optional<at::Scalar> & initial=c10::nullopt) {
25367 return at::_ops::segment_reduce_out::call(data, reduce, lengths, indices, offsets, axis, unsafe, initial, out);
25368}
25369// aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
25370inline at::Tensor & segment_reduce_outf(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial, at::Tensor & out) {
25371 return at::_ops::segment_reduce_out::call(data, reduce, lengths, indices, offsets, axis, unsafe, initial, out);
25372}
25373
25374// aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
25375inline at::Tensor & _segment_reduce_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths={}, const c10::optional<at::Tensor> & offsets={}, int64_t axis=0, const c10::optional<at::Scalar> & initial=c10::nullopt) {
25376 return at::_ops::_segment_reduce_backward_out::call(grad, output, data, reduce, lengths, offsets, axis, initial, out);
25377}
25378// aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)
25379inline at::Tensor & _segment_reduce_backward_outf(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial, at::Tensor & out) {
25380 return at::_ops::_segment_reduce_backward_out::call(grad, output, data, reduce, lengths, offsets, axis, initial, out);
25381}
25382
25383// aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)
25385 return at::_ops::_nested_tensor_from_tensor_list_out::call(list, dtype, layout, device, pin_memory, out);
25386}
25387// aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)
25389 return at::_ops::_nested_tensor_from_tensor_list_out::call(list, dtype, layout, device, pin_memory, out);
25390}
25391
25392// aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)
25393inline at::Tensor & _fw_primal_copy_out(at::Tensor & out, const at::Tensor & self, int64_t level) {
25394 return at::_ops::_fw_primal_copy_out::call(self, level, out);
25395}
25396// aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)
25397inline at::Tensor & _fw_primal_copy_outf(const at::Tensor & self, int64_t level, at::Tensor & out) {
25398 return at::_ops::_fw_primal_copy_out::call(self, level, out);
25399}
25400
25401// aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)
25402inline at::Tensor & _make_dual_copy_out(at::Tensor & out, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
25403 return at::_ops::_make_dual_copy_out::call(primal, tangent, level, out);
25404}
25405// aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)
25406inline at::Tensor & _make_dual_copy_outf(const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out) {
25407 return at::_ops::_make_dual_copy_out::call(primal, tangent, level, out);
25408}
25409
25410// aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25412 return at::_ops::view_as_real_copy_out::call(self, out);
25413}
25414// aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25416 return at::_ops::view_as_real_copy_out::call(self, out);
25417}
25418
25419// aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25421 return at::_ops::view_as_complex_copy_out::call(self, out);
25422}
25423// aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25425 return at::_ops::view_as_complex_copy_out::call(self, out);
25426}
25427
25428// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25429inline at::Tensor & _conj_copy_out(at::Tensor & out, const at::Tensor & self) {
25430 return at::_ops::_conj_copy_out::call(self, out);
25431}
25432// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25433inline at::Tensor & _conj_copy_outf(const at::Tensor & self, at::Tensor & out) {
25434 return at::_ops::_conj_copy_out::call(self, out);
25435}
25436
25437// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25439 return at::_ops::_neg_view_copy_out::call(self, out);
25440}
25441// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25443 return at::_ops::_neg_view_copy_out::call(self, out);
25444}
25445
25446// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
25447inline at::Tensor & as_strided_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) {
25448 return at::_ops::as_strided_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out);
25449}
25450namespace symint {
25451 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25452 at::Tensor & as_strided_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) {
25453 return at::_ops::as_strided_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out);
25454 }
25455}
25456
25457// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
25458inline at::Tensor & as_strided_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset, at::Tensor & out) {
25459 return at::_ops::as_strided_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out);
25460}
25461namespace symint {
25462 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25463 at::Tensor & as_strided_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset, at::Tensor & out) {
25464 return at::_ops::as_strided_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out);
25465 }
25466}
25467
25468// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
25469inline at::Tensor & as_strided_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) {
25470 return at::_ops::as_strided_copy_out::call(self, size, stride, storage_offset, out);
25471}
25472namespace symint {
25473 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25474 at::Tensor & as_strided_copy_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset=c10::nullopt) {
25475 return at::_ops::as_strided_copy_out::call(self, size, stride, storage_offset, out);
25476 }
25477}
25478
25479// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)
25480inline at::Tensor & as_strided_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) {
25481 return at::_ops::as_strided_copy_out::call(self, size, stride, storage_offset, out);
25482}
25483namespace symint {
25484 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25485 at::Tensor & as_strided_copy_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset, at::Tensor & out) {
25486 return at::_ops::as_strided_copy_out::call(self, size, stride, storage_offset, out);
25487 }
25488}
25489
25490// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)
25491inline at::Tensor & _sparse_broadcast_to_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
25492 return at::_ops::_sparse_broadcast_to_copy_out::call(self, size, out);
25493}
25494// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)
25495inline at::Tensor & _sparse_broadcast_to_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
25496 return at::_ops::_sparse_broadcast_to_copy_out::call(self, size, out);
25497}
25498
25499// aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
25500inline at::Tensor & diagonal_copy_out(at::Tensor & out, const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) {
25501 return at::_ops::diagonal_copy_out::call(self, offset, dim1, dim2, out);
25502}
25503// aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)
25504inline at::Tensor & diagonal_copy_outf(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
25505 return at::_ops::diagonal_copy_out::call(self, offset, dim1, dim2, out);
25506}
25507
25508// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
25509inline at::Tensor & expand_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, bool implicit=false) {
25510 return at::_ops::expand_copy_out::call(self, c10::fromIntArrayRefSlow(size), implicit, out);
25511}
25512namespace symint {
25513 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25514 at::Tensor & expand_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, bool implicit=false) {
25515 return at::_ops::expand_copy_out::call(self, c10::fromIntArrayRefSlow(size), implicit, out);
25516 }
25517}
25518
25519// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
25520inline at::Tensor & expand_copy_outf(const at::Tensor & self, at::IntArrayRef size, bool implicit, at::Tensor & out) {
25521 return at::_ops::expand_copy_out::call(self, c10::fromIntArrayRefSlow(size), implicit, out);
25522}
25523namespace symint {
25524 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25525 at::Tensor & expand_copy_outf(const at::Tensor & self, at::IntArrayRef size, bool implicit, at::Tensor & out) {
25526 return at::_ops::expand_copy_out::call(self, c10::fromIntArrayRefSlow(size), implicit, out);
25527 }
25528}
25529
25530// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
25531inline at::Tensor & expand_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit=false) {
25532 return at::_ops::expand_copy_out::call(self, size, implicit, out);
25533}
25534namespace symint {
25535 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25536 at::Tensor & expand_copy_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit=false) {
25537 return at::_ops::expand_copy_out::call(self, size, implicit, out);
25538 }
25539}
25540
25541// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)
25542inline at::Tensor & expand_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) {
25543 return at::_ops::expand_copy_out::call(self, size, implicit, out);
25544}
25545namespace symint {
25546 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25547 at::Tensor & expand_copy_outf(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) {
25548 return at::_ops::expand_copy_out::call(self, size, implicit, out);
25549 }
25550}
25551
25552// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
25553inline at::Tensor & permute_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims) {
25554 return at::_ops::permute_copy_out::call(self, dims, out);
25555}
25556// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)
25557inline at::Tensor & permute_copy_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
25558 return at::_ops::permute_copy_out::call(self, dims, out);
25559}
25560
25561// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
25562inline at::Tensor & _reshape_alias_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
25563 return at::_ops::_reshape_alias_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
25564}
25565namespace symint {
25566 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25567 at::Tensor & _reshape_alias_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
25568 return at::_ops::_reshape_alias_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
25569 }
25570}
25571
25572// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
25573inline at::Tensor & _reshape_alias_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) {
25574 return at::_ops::_reshape_alias_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
25575}
25576namespace symint {
25577 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25578 at::Tensor & _reshape_alias_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) {
25579 return at::_ops::_reshape_alias_copy_out::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), out);
25580 }
25581}
25582
25583// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
25584inline at::Tensor & _reshape_alias_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
25585 return at::_ops::_reshape_alias_copy_out::call(self, size, stride, out);
25586}
25587namespace symint {
25588 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25589 at::Tensor & _reshape_alias_copy_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
25590 return at::_ops::_reshape_alias_copy_out::call(self, size, stride, out);
25591 }
25592}
25593
25594// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)
25595inline at::Tensor & _reshape_alias_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
25596 return at::_ops::_reshape_alias_copy_out::call(self, size, stride, out);
25597}
25598namespace symint {
25599 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25600 at::Tensor & _reshape_alias_copy_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
25601 return at::_ops::_reshape_alias_copy_out::call(self, size, stride, out);
25602 }
25603}
25604
25605// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
25606inline at::Tensor & select_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t index) {
25607 return at::_ops::select_copy_int_out::call(self, dim, index, out);
25608}
25609namespace symint {
25610 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25611 at::Tensor & select_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t index) {
25612 return at::_ops::select_copy_int_out::call(self, dim, index, out);
25613 }
25614}
25615
25616// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
25617inline at::Tensor & select_copy_outf(const at::Tensor & self, int64_t dim, int64_t index, at::Tensor & out) {
25618 return at::_ops::select_copy_int_out::call(self, dim, index, out);
25619}
25620namespace symint {
25621 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25622 at::Tensor & select_copy_outf(const at::Tensor & self, int64_t dim, int64_t index, at::Tensor & out) {
25623 return at::_ops::select_copy_int_out::call(self, dim, index, out);
25624 }
25625}
25626
25627// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
25628inline at::Tensor & select_copy_symint_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt index) {
25629 return at::_ops::select_copy_int_out::call(self, dim, index, out);
25630}
25631namespace symint {
25632 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25633 at::Tensor & select_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt index) {
25634 return at::_ops::select_copy_int_out::call(self, dim, index, out);
25635 }
25636}
25637
25638// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)
25639inline at::Tensor & select_copy_symint_outf(const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out) {
25640 return at::_ops::select_copy_int_out::call(self, dim, index, out);
25641}
25642namespace symint {
25643 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25644 at::Tensor & select_copy_outf(const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out) {
25645 return at::_ops::select_copy_int_out::call(self, dim, index, out);
25646 }
25647}
25648
25649// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25650inline at::Tensor & detach_copy_out(at::Tensor & out, const at::Tensor & self) {
25651 return at::_ops::detach_copy_out::call(self, out);
25652}
25653// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25654inline at::Tensor & detach_copy_outf(const at::Tensor & self, at::Tensor & out) {
25655 return at::_ops::detach_copy_out::call(self, out);
25656}
25657
25658// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
25659inline at::Tensor & slice_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim=0, c10::optional<int64_t> start=c10::nullopt, c10::optional<int64_t> end=c10::nullopt, int64_t step=1) {
25660 return at::_ops::slice_copy_Tensor_out::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out);
25661}
25662namespace symint {
25663 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25665 return at::_ops::slice_copy_Tensor_out::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out);
25666 }
25667}
25668
25669// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
25670inline at::Tensor & slice_copy_outf(const at::Tensor & self, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step, at::Tensor & out) {
25671 return at::_ops::slice_copy_Tensor_out::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out);
25672}
25673namespace symint {
25674 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25675 at::Tensor & slice_copy_outf(const at::Tensor & self, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step, at::Tensor & out) {
25676 return at::_ops::slice_copy_Tensor_out::call(self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out);
25677 }
25678}
25679
25680// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
25682 return at::_ops::slice_copy_Tensor_out::call(self, dim, start, end, step, out);
25683}
25684namespace symint {
25685 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25687 return at::_ops::slice_copy_Tensor_out::call(self, dim, start, end, step, out);
25688 }
25689}
25690
25691// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)
25692inline at::Tensor & slice_copy_symint_outf(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
25693 return at::_ops::slice_copy_Tensor_out::call(self, dim, start, end, step, out);
25694}
25695namespace symint {
25696 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25697 at::Tensor & slice_copy_outf(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
25698 return at::_ops::slice_copy_Tensor_out::call(self, dim, start, end, step, out);
25699 }
25700}
25701
25702// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25703inline at::Tensor & squeeze_copy_out(at::Tensor & out, const at::Tensor & self) {
25704 return at::_ops::squeeze_copy_out::call(self, out);
25705}
25706// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25707inline at::Tensor & squeeze_copy_outf(const at::Tensor & self, at::Tensor & out) {
25708 return at::_ops::squeeze_copy_out::call(self, out);
25709}
25710
25711// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
25712inline at::Tensor & squeeze_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim) {
25713 return at::_ops::squeeze_copy_dim_out::call(self, dim, out);
25714}
25715// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
25716inline at::Tensor & squeeze_copy_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) {
25717 return at::_ops::squeeze_copy_dim_out::call(self, dim, out);
25718}
25719
25720// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
25721inline at::Tensor & squeeze_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim) {
25722 return at::_ops::squeeze_copy_dims_out::call(self, dim, out);
25723}
25724// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)
25725inline at::Tensor & squeeze_copy_outf(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
25726 return at::_ops::squeeze_copy_dims_out::call(self, dim, out);
25727}
25728
25729// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25730inline at::Tensor & t_copy_out(at::Tensor & out, const at::Tensor & self) {
25731 return at::_ops::t_copy_out::call(self, out);
25732}
25733// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25734inline at::Tensor & t_copy_outf(const at::Tensor & self, at::Tensor & out) {
25735 return at::_ops::t_copy_out::call(self, out);
25736}
25737
25738// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
25739inline at::Tensor & transpose_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) {
25740 return at::_ops::transpose_copy_int_out::call(self, dim0, dim1, out);
25741}
25742// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)
25743inline at::Tensor & transpose_copy_outf(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
25744 return at::_ops::transpose_copy_int_out::call(self, dim0, dim1, out);
25745}
25746
25747// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
25748inline at::Tensor & unsqueeze_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim) {
25749 return at::_ops::unsqueeze_copy_out::call(self, dim, out);
25750}
25751// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
25752inline at::Tensor & unsqueeze_copy_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) {
25753 return at::_ops::unsqueeze_copy_out::call(self, dim, out);
25754}
25755
25756// aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25757inline at::Tensor & _indices_copy_out(at::Tensor & out, const at::Tensor & self) {
25758 return at::_ops::_indices_copy_out::call(self, out);
25759}
25760// aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25762 return at::_ops::_indices_copy_out::call(self, out);
25763}
25764
25765// aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25766inline at::Tensor & _values_copy_out(at::Tensor & out, const at::Tensor & self) {
25767 return at::_ops::_values_copy_out::call(self, out);
25768}
25769// aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25770inline at::Tensor & _values_copy_outf(const at::Tensor & self, at::Tensor & out) {
25771 return at::_ops::_values_copy_out::call(self, out);
25772}
25773
25774// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25775inline at::Tensor & indices_copy_out(at::Tensor & out, const at::Tensor & self) {
25776 return at::_ops::indices_copy_out::call(self, out);
25777}
25778// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25779inline at::Tensor & indices_copy_outf(const at::Tensor & self, at::Tensor & out) {
25780 return at::_ops::indices_copy_out::call(self, out);
25781}
25782
25783// aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25784inline at::Tensor & values_copy_out(at::Tensor & out, const at::Tensor & self) {
25785 return at::_ops::values_copy_out::call(self, out);
25786}
25787// aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25788inline at::Tensor & values_copy_outf(const at::Tensor & self, at::Tensor & out) {
25789 return at::_ops::values_copy_out::call(self, out);
25790}
25791
25792// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25794 return at::_ops::crow_indices_copy_out::call(self, out);
25795}
25796// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25798 return at::_ops::crow_indices_copy_out::call(self, out);
25799}
25800
25801// aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25803 return at::_ops::col_indices_copy_out::call(self, out);
25804}
25805// aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25807 return at::_ops::col_indices_copy_out::call(self, out);
25808}
25809
25810// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25812 return at::_ops::ccol_indices_copy_out::call(self, out);
25813}
25814// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25816 return at::_ops::ccol_indices_copy_out::call(self, out);
25817}
25818
25819// aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25821 return at::_ops::row_indices_copy_out::call(self, out);
25822}
25823// aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25825 return at::_ops::row_indices_copy_out::call(self, out);
25826}
25827
25828// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
25829inline at::Tensor & view_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
25830 return at::_ops::view_copy_out::call(self, c10::fromIntArrayRefSlow(size), out);
25831}
25832namespace symint {
25833 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25834 at::Tensor & view_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) {
25835 return at::_ops::view_copy_out::call(self, c10::fromIntArrayRefSlow(size), out);
25836 }
25837}
25838
25839// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
25840inline at::Tensor & view_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
25841 return at::_ops::view_copy_out::call(self, c10::fromIntArrayRefSlow(size), out);
25842}
25843namespace symint {
25844 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25845 at::Tensor & view_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
25846 return at::_ops::view_copy_out::call(self, c10::fromIntArrayRefSlow(size), out);
25847 }
25848}
25849
25850// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
25851inline at::Tensor & view_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
25852 return at::_ops::view_copy_out::call(self, size, out);
25853}
25854namespace symint {
25855 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25856 at::Tensor & view_copy_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) {
25857 return at::_ops::view_copy_out::call(self, size, out);
25858 }
25859}
25860
25861// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)
25862inline at::Tensor & view_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
25863 return at::_ops::view_copy_out::call(self, size, out);
25864}
25865namespace symint {
25866 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25867 at::Tensor & view_copy_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
25868 return at::_ops::view_copy_out::call(self, size, out);
25869 }
25870}
25871
25872// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
25873inline at::Tensor & view_copy_out(at::Tensor & out, const at::Tensor & self, at::ScalarType dtype) {
25874 return at::_ops::view_copy_dtype_out::call(self, dtype, out);
25875}
25876// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
25877inline at::Tensor & view_copy_outf(const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) {
25878 return at::_ops::view_copy_dtype_out::call(self, dtype, out);
25879}
25880
25881// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
25882inline at::Tensor & unfold_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
25883 return at::_ops::unfold_copy_out::call(self, dimension, size, step, out);
25884}
25885// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)
25886inline at::Tensor & unfold_copy_outf(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) {
25887 return at::_ops::unfold_copy_out::call(self, dimension, size, step, out);
25888}
25889
25890// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25891inline at::Tensor & alias_copy_out(at::Tensor & out, const at::Tensor & self) {
25892 return at::_ops::alias_copy_out::call(self, out);
25893}
25894// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
25895inline at::Tensor & alias_copy_outf(const at::Tensor & self, at::Tensor & out) {
25896 return at::_ops::alias_copy_out::call(self, out);
25897}
25898
25899// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)
25900inline at::Tensor & to_padded_tensor_out(at::Tensor & out, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt) {
25901 return at::_ops::to_padded_tensor_out::call(self, padding, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, out);
25902}
25903namespace symint {
25904 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25905 at::Tensor & to_padded_tensor_out(at::Tensor & out, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt) {
25906 return at::_ops::to_padded_tensor_out::call(self, padding, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, out);
25907 }
25908}
25909
25910// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)
25911inline at::Tensor & to_padded_tensor_outf(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size, at::Tensor & out) {
25912 return at::_ops::to_padded_tensor_out::call(self, padding, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, out);
25913}
25914namespace symint {
25915 template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
25916 at::Tensor & to_padded_tensor_outf(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size, at::Tensor & out) {
25917 return at::_ops::to_padded_tensor_out::call(self, padding, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt, out);
25918 }
25919}
25920
25921// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)
25922inline at::Tensor & to_padded_tensor_symint_out(at::Tensor & out, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size=c10::nullopt) {
25923 return at::_ops::to_padded_tensor_out::call(self, padding, output_size, out);
25924}
25925namespace symint {
25926 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25927 at::Tensor & to_padded_tensor_out(at::Tensor & out, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size=c10::nullopt) {
25928 return at::_ops::to_padded_tensor_out::call(self, padding, output_size, out);
25929 }
25930}
25931
25932// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)
25933inline at::Tensor & to_padded_tensor_symint_outf(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out) {
25934 return at::_ops::to_padded_tensor_out::call(self, padding, output_size, out);
25935}
25936namespace symint {
25937 template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
25938 at::Tensor & to_padded_tensor_outf(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out) {
25939 return at::_ops::to_padded_tensor_out::call(self, padding, output_size, out);
25940 }
25941}
25942
25943// aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
25944inline at::Tensor & _transformer_encoder_layer_fwd_out(at::Tensor & out, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask={}, c10::optional<int64_t> mask_type=c10::nullopt) {
25945 return at::_ops::_transformer_encoder_layer_fwd_out::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out);
25946}
25947// aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)
25948inline at::Tensor & _transformer_encoder_layer_fwd_outf(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type, at::Tensor & out) {
25949 return at::_ops::_transformer_encoder_layer_fwd_out::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out);
25950}
25951
25952// aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
25953inline ::std::tuple<at::Tensor &,at::Tensor &> _native_multi_head_attention_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask={}, bool need_weights=true, bool average_attn_weights=true, c10::optional<int64_t> mask_type=c10::nullopt) {
25954 return at::_ops::_native_multi_head_attention_out::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1);
25955}
25956// aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
25957inline ::std::tuple<at::Tensor &,at::Tensor &> _native_multi_head_attention_outf(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type, at::Tensor & out0, at::Tensor & out1) {
25958 return at::_ops::_native_multi_head_attention_out::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1);
25959}
25960
25961// aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)
25962inline at::Tensor & _triton_scaled_dot_attention_out(at::Tensor & out, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p=0.0) {
25963 return at::_ops::_triton_scaled_dot_attention_out::call(q, k, v, dropout_p, out);
25964}
25965// aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)
25966inline at::Tensor & _triton_scaled_dot_attention_outf(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) {
25967 return at::_ops::_triton_scaled_dot_attention_out::call(q, k, v, dropout_p, out);
25968}
25969
25970// aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)
25971inline at::Tensor & _triton_multi_head_attention_out(at::Tensor & out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask={}) {
25972 return at::_ops::_triton_multi_head_attention_out::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out);
25973}
25974// aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)
25975inline at::Tensor & _triton_multi_head_attention_outf(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, at::Tensor & out) {
25976 return at::_ops::_triton_multi_head_attention_out::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out);
25977}
25978
25979// aten::_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
25980inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transformer_decoder_only_layer_fwd_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask={}, const c10::optional<at::Tensor> & incr_key={}, const c10::optional<at::Tensor> & incr_value={}) {
25981 return at::_ops::_transformer_decoder_only_layer_fwd_out::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value, out0, out1, out2);
25982}
25983// aten::_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
25984inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transformer_decoder_only_layer_fwd_outf(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
25985 return at::_ops::_transformer_decoder_only_layer_fwd_out::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value, out0, out1, out2);
25986}
25987
25988// aten::_native_decoder_only_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
25989inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _native_decoder_only_multi_head_attention_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask={}, const c10::optional<at::Tensor> & incr_key={}, const c10::optional<at::Tensor> & incr_value={}, bool need_weights=true, bool average_attn_weights=true) {
25990 return at::_ops::_native_decoder_only_multi_head_attention_out::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights, out0, out1, out2, out3);
25991}
25992// aten::_native_decoder_only_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
25993inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _native_decoder_only_multi_head_attention_outf(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, bool need_weights, bool average_attn_weights, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
25994 return at::_ops::_native_decoder_only_multi_head_attention_out::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights, out0, out1, out2, out3);
25995}
25996
25997// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)
25998inline at::Tensor & _foobar_out(at::Tensor & out, const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true) {
25999 return at::_ops::_foobar_out::call(self, arg1, arg2, arg3, out);
26000}
26001// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)
26002inline at::Tensor & _foobar_outf(const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) {
26003 return at::_ops::_foobar_out::call(self, arg1, arg2, arg3, out);
26004}
26005
26006// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
26007inline void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale={}, const c10::optional<at::Tensor> & found_inf={}) {
26008 return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
26009}
26010// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
26011inline void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
26012 return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
26013}
26014
26015// aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
26016inline ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale={}, const c10::optional<at::Tensor> & found_inf={}) {
26017 return at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
26018}
26019
26020// aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
26021inline void _fused_adamw_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale={}, const c10::optional<at::Tensor> & found_inf={}) {
26022 return at::_ops::_fused_adamw_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
26023}
26024// aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()
26025inline void _fused_adamw_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
26026 return at::_ops::_fused_adamw_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
26027}
26028
26029// aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)
26030inline ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale={}, const c10::optional<at::Tensor> & found_inf={}) {
26031 return at::_ops::_fused_adamw::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
26032}
26033
26034// Special C++ only overloads for std()-like functions (See gh-40287)
26035// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
26036// So, for example std(0) would select the std(unbiased=False) overload
26037TORCH_API inline Tensor var(const Tensor& self, int dim) {
26038 return at::var(self, IntArrayRef{dim});
26039}
26040TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) {
26041 return at::var_mean(self, IntArrayRef{dim});
26042}
26043TORCH_API inline Tensor std(const Tensor& self, int dim) {
26044 return at::std(self, IntArrayRef{dim});
26045}
26046TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) {
26047 return at::std_mean(self, IntArrayRef{dim});
26048}
26049
26050inline int64_t numel(const Tensor& tensor) {
26051 return tensor.numel();
26052}
26053
26054inline int64_t size(const Tensor& tensor, int64_t dim) {
26055 return tensor.size(dim);
26056}
26057
26058inline int64_t stride(const Tensor& tensor, int64_t dim) {
26059 return tensor.stride(dim);
26060}
26061
26062inline bool is_complex(const Tensor& tensor) {
26063 return tensor.is_complex();
26064}
26065
26066inline bool is_floating_point(const Tensor& tensor) {
26067 return tensor.is_floating_point();
26068}
26069
26070inline bool is_signed(const Tensor& tensor) {
26071 return tensor.is_signed();
26072}
26073
26074inline bool is_inference(const Tensor& tensor) {
26075 return tensor.is_inference();
26076}
26077
26078inline bool _is_zerotensor(const Tensor& tensor) {
26079 return tensor._is_zerotensor();
26080}
26081
26082inline bool is_conj(const Tensor& tensor) {
26083 return tensor.is_conj();
26084}
26085
26086inline Tensor conj(const Tensor& tensor) {
26087 return tensor.conj();
26088}
26089
26090inline bool is_neg(const Tensor& tensor) {
26091 return tensor.is_neg();
26092}
26093
26094}
bool is_inference() const
Returns if a Tensor is an inference tensor.
Definition: TensorBase.h:503
bool is_signed() const
Definition: TensorBase.h:157
bool is_floating_point() const
Definition: TensorBase.h:153
int64_t numel() const
Definition: TensorBase.h:305
bool _is_zerotensor() const
Definition: TensorBase.h:344
bool is_complex() const
Definition: TensorBase.h:149
bool is_conj() const
Definition: TensorBase.h:352
bool is_neg() const
Definition: TensorBase.h:364
Definition: TensorBody.h:90
Tensor conj() const
Definition: TensorBody.h:124
int64_t stride(at::Dimname dim) const
Definition: TensorBody.h:3555
int64_t size(at::Dimname dim) const
Definition: TensorBody.h:3350
Definition: ivalue.h:31
Definition: Optional.h:549
constexpr bool has_value() const noexcept
Definition: Optional.h:735
at::Tensor constant_pad_nd(const at::Tensor &self, at::IntArrayRef pad, const at::Scalar &value=0)
Definition: Functions.h:1692
at::Tensor reflection_pad1d(const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13690
at::Tensor upsample_nearest2d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16044
at::Tensor & upsample_bilinear2d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:14977
at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong)
Definition: Functions.h:5716
at::Tensor & _upsample_nearest_exact2d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:16011
at::Tensor slow_conv_transpose3d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:16504
void unsafe_split_with_sizes_outf(const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out)
Definition: Functions.h:22830
at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor &self, at::IntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:13303
at::Tensor & slow_conv_dilated3d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:25255
at::Tensor & upsample_bilinear2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:14900
at::Tensor & zeros_out(at::Tensor &out, at::IntArrayRef size)
Definition: Functions.h:8203
at::Tensor & adaptive_avg_pool2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13180
at::Tensor & replication_pad2d_outf(const at::Tensor &self, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:14185
at::Tensor replication_pad2d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14284
at::Tensor ones(at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:5189
at::Tensor & constant_pad_nd_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef pad, const at::Scalar &value=0)
Definition: Functions.h:20220
at::Tensor upsample_bilinear2d(const at::Tensor &input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14526
at::Tensor & empty_outf(at::IntArrayRef size, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:2847
at::Tensor pad(const at::Tensor &self, at::IntArrayRef pad, c10::string_view mode="constant", c10::optional< double > value=c10::nullopt)
Definition: Functions.h:14482
at::Tensor & upsample_linear1d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:14834
at::Tensor new_zeros(const at::Tensor &self, at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:2730
at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16066
::std::vector< at::Tensor > split_with_sizes(const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:7296
at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15406
at::Tensor _upsample_bicubic2d_aa(const at::Tensor &input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14614
at::Tensor & empty_out(at::Tensor &out, at::IntArrayRef size, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:2836
at::Tensor & slow_conv_dilated2d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:25222
at::Tensor _reshape_alias(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride)
Definition: Functions.h:6583
at::Tensor & mkldnn_convolution_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:21786
at::Tensor & upsample_nearest2d_outf(const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15835
at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15879
at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:6086
at::Tensor & select_scatter_outf(const at::Tensor &self, const at::Tensor &src, int64_t dim, int64_t index, at::Tensor &out)
Definition: Functions.h:22689
at::Tensor & upsample_nearest2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15824
at::Tensor & nll_loss_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, const at::Tensor &total_weight, at::Tensor &grad_input)
Definition: Functions.h:12527
at::Tensor & reflection_pad3d_outf(const at::Tensor &self, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:13921
at::Tensor upsample_bicubic2d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15274
at::Tensor & _upsample_bilinear2d_aa_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15109
at::Tensor view_copy(const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:19056
at::Tensor empty(at::IntArrayRef size, at::TensorOptions options={}, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:2606
::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_group_norm_backward(const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:3486
at::Tensor replication_pad3d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14416
at::Tensor slow_conv3d_forward(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding)
Definition: Functions.h:16766
at::Tensor & select_scatter_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &src, int64_t dim, int64_t index)
Definition: Functions.h:22678
void unsafe_split_outf(const at::Tensor &self, int64_t split_size, int64_t dim, at::TensorList out)
Definition: Functions.h:22786
at::Tensor & repeat_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef repeats)
Definition: Functions.h:22466
at::Tensor broadcast_to(const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:1299
at::Tensor & upsample_linear1d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:14768
at::Tensor & upsample_trilinear3d_outf(const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15439
at::Tensor & zeros_outf(at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:8214
at::Tensor & _adaptive_avg_pool3d_outf(const at::Tensor &self, at::IntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:25116
at::Tensor & ones_out(at::Tensor &out, at::IntArrayRef size)
Definition: Functions.h:5233
at::Tensor new_full(const at::Tensor &self, at::IntArrayRef size, const at::Scalar &fill_value, at::TensorOptions options={})
Definition: Functions.h:2702
at::Tensor _adaptive_avg_pool2d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13265
at::Tensor & replication_pad3d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:14383
at::Tensor & select_backward_out(at::Tensor &out, const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index)
Definition: Functions.h:22537
at::Tensor reflection_pad2d(const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13822
at::Tensor & as_strided_scatter_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset=c10::nullopt)
Definition: Functions.h:22731
::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_layer_norm_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &input, at::IntArrayRef normalized_shape, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, double eps)
Definition: Functions.h:21527
at::Tensor & new_zeros_outf(const at::Tensor &self, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:20999
at::Tensor & miopen_convolution_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:21866
at::Tensor resize(const at::Tensor &self, at::IntArrayRef size, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:21138
at::Tensor replication_pad3d(const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14350
at::Tensor select_scatter(const at::Tensor &self, const at::Tensor &src, int64_t dim, int64_t index)
Definition: Functions.h:7107
at::Tensor & view_copy_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:25834
at::Tensor & set_(at::Tensor &self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={})
Definition: Functions.h:9367
at::Tensor & upsample_nearest1d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15692
at::Tensor upsample_linear1d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:14878
at::Tensor diagonal_backward(const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2)
Definition: Functions.h:2212
at::Tensor & new_zeros_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:20988
at::Tensor slice_scatter(const at::Tensor &self, const at::Tensor &src, int64_t dim=0, c10::optional< int64_t > start=c10::nullopt, c10::optional< int64_t > end=c10::nullopt, int64_t step=1)
Definition: Functions.h:7085
at::Tensor narrow(const at::Tensor &self, int64_t dim, int64_t start, int64_t length)
Definition: Functions.h:5013
at::Tensor & _embedding_bag_dense_backward_out(at::Tensor &out, const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offset2bag, const at::Tensor &bag_size, const at::Tensor &maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx=-1)
Definition: Functions.h:20794
::std::tuple< at::Tensor, at::Tensor, at::Tensor > _convolution_double_backward(const c10::optional< at::Tensor > &ggI, const c10::optional< at::Tensor > &ggW, const c10::optional< at::Tensor > &ggb, const at::Tensor &gO, const at::Tensor &weight, const at::Tensor &self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:1800
::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_layer_norm_outf(const at::Tensor &input, at::IntArrayRef normalized_shape, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, double eps, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21538
at::Tensor & _adaptive_avg_pool2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:25052
at::Tensor upsample_nearest1d(const at::Tensor &input, at::OptionalIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14636
::std::tuple< at::Tensor, at::Tensor > nll_loss2d_forward(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index)
Definition: Functions.h:12692
at::Tensor & nll_loss2d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100)
Definition: Functions.h:12582
at::Tensor slow_conv3d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0)
Definition: Functions.h:16700
at::Tensor reflection_pad3d(const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13954
at::Tensor & _embedding_bag_dense_backward_outf(const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offset2bag, const at::Tensor &bag_size, const at::Tensor &maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx, at::Tensor &out)
Definition: Functions.h:20805
at::Tensor & select_backward_outf(const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index, at::Tensor &out)
Definition: Functions.h:22548
at::Tensor _convolution(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32)
Definition: Functions.h:1768
at::Tensor & view_copy_outf(const at::Tensor &self, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:25845
at::Tensor & _reshape_alias_copy_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride)
Definition: Functions.h:25567
at::Tensor & constant_pad_nd_outf(const at::Tensor &self, at::IntArrayRef pad, const at::Scalar &value, at::Tensor &out)
Definition: Functions.h:20231
at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:5630
at::Tensor reflection_pad3d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14020
at::Tensor & slice_copy_out(at::Tensor &out, const at::Tensor &self, int64_t dim=0, c10::optional< int64_t > start=c10::nullopt, c10::optional< int64_t > end=c10::nullopt, int64_t step=1)
Definition: Functions.h:25664
::std::tuple< at::Tensor, at::Tensor, at::Tensor > convolution_backward(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:1736
at::Tensor & _adaptive_avg_pool3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:25105
at::Tensor & _convolution_outf(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor &out)
Definition: Functions.h:20381
at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor &indices, const at::Tensor &values, at::TensorOptions options)
Definition: Functions.h:8887
::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _cudnn_rnn_outf(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const c10::optional< at::Tensor > &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4)
Definition: Functions.h:19967
at::Tensor & expand_copy_outf(const at::Tensor &self, at::IntArrayRef size, bool implicit, at::Tensor &out)
Definition: Functions.h:25525
at::Tensor & upsample_nearest3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16220
::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_group_norm_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps)
Definition: Functions.h:21407
at::Tensor & reflection_pad2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13844
::std::vector< at::Tensor > tensor_split(const at::Tensor &self, int64_t sections, int64_t dim=0)
Definition: Functions.h:1458
at::Tensor & slow_conv3d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0)
Definition: Functions.h:16656
at::Tensor layer_norm(const at::Tensor &input, at::IntArrayRef normalized_shape, const c10::optional< at::Tensor > &weight={}, const c10::optional< at::Tensor > &bias={}, double eps=1e-05, bool cudnn_enable=true)
Definition: Functions.h:3834
at::Tensor & replication_pad3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14372
at::Tensor _pad_enum(const at::Tensor &self, at::IntArrayRef pad, int64_t mode, c10::optional< double > value=c10::nullopt)
Definition: Functions.h:14460
at::Tensor & replication_pad3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14306
at::Tensor reflection_pad1d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13756
::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_layer_norm_backward_outf(const at::Tensor &grad_out, const at::Tensor &input, at::IntArrayRef normalized_shape, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21582
void unsafe_split_out(at::TensorList out, const at::Tensor &self, int64_t split_size, int64_t dim=0)
Definition: Functions.h:22775
at::Tensor & diagonal_backward_outf(const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor &out)
Definition: Functions.h:20623
at::Tensor & upsample_linear1d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales, at::Tensor &grad_input)
Definition: Functions.h:14845
at::Tensor & replication_pad2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14174
at::Tensor miopen_depthwise_convolution(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:4782
at::Tensor select_copy(const at::Tensor &self, int64_t dim, int64_t index)
Definition: Functions.h:18791
at::Tensor nll_loss(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100)
Definition: Functions.h:12428
const at::Tensor & resize_out(const at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:21094
at::Tensor & upsample_nearest2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15956
at::Tensor & new_empty_outf(const at::Tensor &self, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:20867
at::Tensor & randint_out(at::Tensor &out, int64_t high, at::IntArrayRef size)
Definition: Functions.h:5892
at::Tensor & _nnpack_spatial_convolution_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride=1)
Definition: Functions.h:22102
at::Tensor & upsample_trilinear3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15428
::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_layer_norm_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &grad_out, const at::Tensor &input, at::IntArrayRef normalized_shape, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:21571
void unsafe_split_with_sizes_out(at::TensorList out, const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:22819
at::Tensor & select_copy_out(at::Tensor &out, const at::Tensor &self, int64_t dim, int64_t index)
Definition: Functions.h:25611
c10::SymInt size(const TensorBase &t, int64_t dim)
Definition: TensorBase.h:959
at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:16231
at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional)
Definition: Functions.h:188
at::Tensor & full_out(at::Tensor &out, at::IntArrayRef size, const at::Scalar &fill_value)
Definition: Functions.h:3238
at::Tensor & replication_pad1d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:14119
at::Tensor _fft_c2c(const at::Tensor &self, at::IntArrayRef dim, int64_t normalization, bool forward)
Definition: Functions.h:3536
at::Tensor & reflection_pad1d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13646
at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:5903
at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor &out)
Definition: Functions.h:21965
at::Tensor & _fft_c2c_outf(const at::Tensor &self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor &out)
Definition: Functions.h:3569
at::Tensor & conv_depthwise3d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:25178
at::Tensor & rand_out(at::Tensor &out, at::IntArrayRef size)
Definition: Functions.h:5619
at::Tensor & slow_conv_dilated2d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:25211
at::Tensor _upsample_nearest_exact3d(const at::Tensor &input, at::OptionalIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14746
::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > convolution_backward_outf(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:20319
at::Tensor & set_out(at::Tensor &out, const at::Tensor &self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={})
Definition: Functions.h:23878
at::Tensor & diagonal_backward_out(at::Tensor &out, const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2)
Definition: Functions.h:20612
at::Tensor & replication_pad2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:14251
at::Tensor & col2im_outf(const at::Tensor &self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:16843
at::Tensor to_padded_tensor(const at::Tensor &self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt)
Definition: Functions.h:19089
at::Tensor & mkldnn_convolution_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor &out)
Definition: Functions.h:21797
at::Tensor & randn_out(at::Tensor &out, at::IntArrayRef size)
Definition: Functions.h:6262
at::Tensor & replication_pad1d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14108
at::Tensor repeat(const at::Tensor &self, at::IntArrayRef repeats)
Definition: Functions.h:6489
at::Tensor & nll_loss2d_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, at::Tensor &out)
Definition: Functions.h:12593
at::Tensor nll_loss2d_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, const at::Tensor &total_weight)
Definition: Functions.h:12758
at::Tensor & set_outf(const at::Tensor &self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:23889
at::Tensor _adaptive_avg_pool3d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13358
at::Tensor convolution(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups)
Definition: Functions.h:1714
at::Tensor _embedding_bag_sparse_backward(const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offsets, const at::Tensor &offset2bag, const at::Tensor &bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx=-1)
Definition: Functions.h:2548
at::Tensor & upsample_nearest3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16088
at::Tensor _nested_select_backward(const at::Tensor &grad_output, const at::Tensor &self, int64_t dim, int64_t index)
Definition: Functions.h:6827
at::Tensor _pad_circular(const at::Tensor &self, at::IntArrayRef pad)
Definition: Functions.h:14438
::std::tuple< at::Tensor &, at::Tensor & > nll_loss2d_forward_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, at::Tensor &output, at::Tensor &total_weight)
Definition: Functions.h:12659
at::Tensor mkldnn_convolution(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:4696
at::Tensor upsample_bilinear2d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15010
at::Tensor _unsafe_view(const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:7964
at::Tensor _embedding_bag_dense_backward(const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offset2bag, const at::Tensor &bag_size, const at::Tensor &maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx=-1)
Definition: Functions.h:2570
at::Tensor & _upsample_bicubic2d_aa_outf(const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15307
at::Tensor & reflection_pad2d_outf(const at::Tensor &self, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:13789
::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_group_norm_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:21451
at::Tensor & _upsample_nearest_exact3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16264
::std::tuple< at::Tensor, at::Tensor, at::Tensor,::std::vector< at::Tensor > > _cudnn_rnn_backward(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, const at::Tensor &output, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, const at::Tensor &reserve, ::std::array< bool, 4 > output_mask)
Definition: Functions.h:232
::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_group_norm_outf(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21418
at::Tensor slice_copy(const at::Tensor &self, int64_t dim=0, c10::optional< int64_t > start=c10::nullopt, c10::optional< int64_t > end=c10::nullopt, int64_t step=1)
Definition: Functions.h:18818
at::Tensor & embedding_dense_backward_outf(const at::Tensor &grad_output, const at::Tensor &indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, at::Tensor &out)
Definition: Functions.h:20729
at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15373
at::Tensor & _upsample_nearest_exact2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16000
at::Tensor reflection_pad2d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13888
at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor &indices, const at::Tensor &values, at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:8804
at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor &indices, const at::Tensor &values, at::Tensor &out)
Definition: Functions.h:23428
at::Tensor & embedding_out(at::Tensor &out, const at::Tensor &weight, const at::Tensor &indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false)
Definition: Functions.h:20674
at::Tensor & new_ones_outf(const at::Tensor &self, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:21043
at::Tensor & nll_loss2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, const at::Tensor &total_weight)
Definition: Functions.h:12714
at::Tensor & _upsample_bilinear2d_aa_outf(const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15043
at::Tensor nll_loss2d(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100)
Definition: Functions.h:12626
at::Tensor slow_conv_dilated2d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:16788
at::Tensor & upsample_bicubic2d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15241
at::Tensor & upsample_bilinear2d_outf(const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:14911
at::Tensor & _unsafe_view_outf(const at::Tensor &self, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:23054
::std::tuple< at::Tensor &, at::Tensor & > nll_loss2d_forward_out(at::Tensor &output, at::Tensor &total_weight, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index)
Definition: Functions.h:12648
at::Tensor & replication_pad2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14240
at::Tensor & upsample_nearest1d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15560
at::Tensor & normal_out(at::Tensor &out, double mean, double std, at::IntArrayRef size, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:11524
at::Tensor & expand_copy_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, bool implicit=false)
Definition: Functions.h:25514
at::Tensor upsample_linear1d(const at::Tensor &input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14504
at::Tensor & slice_scatter_outf(const at::Tensor &self, const at::Tensor &src, int64_t dim, c10::optional< int64_t > start, c10::optional< int64_t > end, int64_t step, at::Tensor &out)
Definition: Functions.h:22645
at::Tensor rand(at::IntArrayRef size, c10::optional< at::DimnameList > names, at::TensorOptions options={})
Definition: Functions.h:5443
at::Tensor & upsample_bicubic2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15164
at::Tensor as_strided(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset=c10::nullopt)
Definition: Functions.h:900
at::Tensor cross_entropy_loss(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100, double label_smoothing=0.0)
Definition: Functions.h:10435
at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional< at::Generator > generator=c10::nullopt, at::TensorOptions options={})
Definition: Functions.h:11480
at::Tensor replication_pad1d(const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14086
at::Tensor & reflection_pad2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13778
at::Tensor & upsample_nearest3d_outf(const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:16099
at::Tensor conv_depthwise3d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation)
Definition: Functions.h:16634
at::Tensor & convolution_outf(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor &out)
Definition: Functions.h:20275
c10::SymIntArrayRef sizes(const TensorBase &t)
Definition: TensorBase.h:954
at::Tensor & _upsample_nearest_exact2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15868
at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor &self, at::IntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:13191
at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15362
::std::vector< at::Tensor > unsafe_split_with_sizes(const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:7274
at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16330
at::Tensor & to_padded_tensor_outf(const at::Tensor &self, double padding, at::OptionalIntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:25916
at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor &out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional)
Definition: Functions.h:19912
at::Tensor & upsample_nearest1d_outf(const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales, at::Tensor &out)
Definition: Functions.h:15571
at::Tensor & convolution_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups)
Definition: Functions.h:20264
at::Tensor _upsample_nearest_exact1d(const at::Tensor &input, at::OptionalIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14658
at::Tensor & _upsample_bicubic2d_aa_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15296
void _cudnn_rnn_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::TensorList out3, const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, const at::Tensor &output, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, const at::Tensor &reserve, ::std::array< bool, 4 > output_mask)
Definition: Functions.h:20000
at::Tensor & reflection_pad1d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13712
at::Tensor & upsample_trilinear3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15494
at::Tensor view(const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:9435
at::Tensor & upsample_bicubic2d_outf(const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15175
at::Tensor upsample_bicubic2d(const at::Tensor &input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14592
at::Tensor & adaptive_avg_pool3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13292
void split_with_sizes_copy_outf(const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out)
Definition: Functions.h:19023
at::Tensor & embedding_dense_backward_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq)
Definition: Functions.h:20718
at::Tensor & reflection_pad1d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:13723
at::Tensor upsample_nearest2d(const at::Tensor &input, at::OptionalIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14680
at::Tensor & new_empty_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:20856
at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales, at::Tensor &grad_input)
Definition: Functions.h:15747
at::Tensor embedding_backward(const at::Tensor &grad, const at::Tensor &indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse)
Definition: Functions.h:2433
at::Tensor replication_pad2d(const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14218
void _cudnn_rnn_backward_outf(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, const at::Tensor &output, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, const at::Tensor &reserve, ::std::array< bool, 4 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::TensorList out3)
Definition: Functions.h:20011
at::Tensor & reflection_pad3d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:13987
at::Tensor & miopen_convolution_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor &out)
Definition: Functions.h:21877
at::Tensor & _unsafe_view_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:23043
at::Tensor _reshape_copy(const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:6561
const at::Tensor & resize_outf(const at::Tensor &self, at::IntArrayRef size, c10::optional< at::MemoryFormat > memory_format, const at::Tensor &out)
Definition: Functions.h:21105
at::Tensor & slow_conv3d_forward_out(at::Tensor &output, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding)
Definition: Functions.h:16722
at::Tensor slow_conv_transpose2d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:16438
at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor &out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor &indices, const at::Tensor &values)
Definition: Functions.h:23417
at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor &out)
Definition: Functions.h:19923
::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > convolution_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:20308
at::Tensor & nll_loss_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, const at::Tensor &total_weight)
Definition: Functions.h:12516
at::Tensor miopen_convolution_transpose(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:4760
at::Tensor & conv_depthwise3d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation)
Definition: Functions.h:25167
at::Tensor select(const at::Tensor &self, int64_t dim, int64_t index)
Definition: Functions.h:6783
const at::Tensor & as_strided_(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset=c10::nullopt)
Definition: Functions.h:922
at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:16143
at::Tensor & as_strided_scatter_outf(const at::Tensor &self, const at::Tensor &src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset, at::Tensor &out)
Definition: Functions.h:22742
at::Tensor & replication_pad1d_outf(const at::Tensor &self, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:14053
at::Tensor & slow_conv_transpose2d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:16394
::std::tuple< at::Tensor, at::Tensor > nll_loss_forward(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index)
Definition: Functions.h:12494
::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_group_norm(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps)
Definition: Functions.h:3464
at::Tensor & slow_conv_dilated3d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:25266
at::Tensor _embedding_bag_backward(const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offsets, const at::Tensor &offset2bag, const at::Tensor &bag_size, const at::Tensor &maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx=-1)
Definition: Functions.h:2526
::std::vector< at::Tensor > split_with_sizes_copy(const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:18862
at::Tensor narrow_copy(const at::Tensor &self, int64_t dim, int64_t start, int64_t length)
Definition: Functions.h:4947
at::Tensor & reflection_pad3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13976
at::Tensor upsample_trilinear3d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15538
::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor > _cudnn_rnn(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const c10::optional< at::Tensor > &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state)
Definition: Functions.h:210
at::Tensor & narrow_copy_outf(const at::Tensor &self, int64_t dim, int64_t start, int64_t length, at::Tensor &out)
Definition: Functions.h:4980
at::Tensor & embedding_outf(const at::Tensor &weight, const at::Tensor &indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor &out)
Definition: Functions.h:20685
const at::Tensor & _conv_depthwise2d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, const at::Tensor &out)
Definition: Functions.h:16579
at::Tensor embedding_dense_backward(const at::Tensor &grad_output, const at::Tensor &indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq)
Definition: Functions.h:2455
at::Tensor zeros(at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:8159
at::Tensor & _convolution_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32)
Definition: Functions.h:20370
at::Tensor & reflection_pad2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:13855
at::Tensor & _upsample_nearest_exact3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16132
at::Tensor & slow_conv_transpose2d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:16405
at::Tensor _conv_depthwise2d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation)
Definition: Functions.h:16612
at::Tensor & slow_conv_transpose3d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:16471
at::Tensor & new_empty_strided_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride)
Definition: Functions.h:20900
at::Tensor & _adaptive_avg_pool2d_outf(const at::Tensor &self, at::IntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:25063
at::Tensor & miopen_depthwise_convolution_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:21954
at::Tensor & _reshape_alias_copy_outf(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:25578
at::Tensor & upsample_bilinear2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:14966
at::Tensor & _fft_c2c_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim, int64_t normalization, bool forward)
Definition: Functions.h:3558
at::Tensor & miopen_convolution_transpose_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:21910
at::Tensor & upsample_bicubic2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15230
at::Tensor & repeat_outf(const at::Tensor &self, at::IntArrayRef repeats, at::Tensor &out)
Definition: Functions.h:22477
const at::Tensor & resize_(const at::Tensor &self, at::IntArrayRef size, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:2804
at::Tensor & new_empty_strided_outf(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:20911
at::Tensor & new_full_outf(const at::Tensor &self, at::IntArrayRef size, const at::Scalar &fill_value, at::Tensor &out)
Definition: Functions.h:20955
at::Tensor upsample_nearest3d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16308
at::Tensor & reflection_pad3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13910
at::Tensor & reflection_pad1d_outf(const at::Tensor &self, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:13657
at::Tensor & nll_loss_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100)
Definition: Functions.h:12362
at::Tensor miopen_convolution(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:4738
at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar &fill_value, at::Tensor &out)
Definition: Functions.h:3249
at::Tensor _upsample_bilinear2d_aa_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15142
at::Tensor as_strided_copy(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset=c10::nullopt)
Definition: Functions.h:18710
at::Tensor & normal_outf(double mean, double std, at::IntArrayRef size, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:11535
at::Tensor expand_copy(const at::Tensor &self, at::IntArrayRef size, bool implicit=false)
Definition: Functions.h:18742
at::Tensor upsample_trilinear3d(const at::Tensor &input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14570
at::Tensor _upsample_nearest_exact2d(const at::Tensor &input, at::OptionalIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14702
at::Tensor slow_conv_dilated3d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:16810
at::Tensor & new_ones_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:21032
at::Tensor slice_backward(const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step)
Definition: Functions.h:7063
at::Tensor & replication_pad1d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14042
::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _cudnn_rnn_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4, const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const c10::optional< at::Tensor > &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state)
Definition: Functions.h:19956
at::Tensor & nll_loss2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, const at::Tensor &total_weight, at::Tensor &grad_input)
Definition: Functions.h:12725
at::Tensor _upsample_bilinear2d_aa(const at::Tensor &input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14548
at::Tensor nll_loss_nd(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100)
Definition: Functions.h:12406
at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={})
Definition: Functions.h:2889
at::Tensor & empty_strided_outf(at::IntArrayRef size, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:21203
::std::vector< at::Tensor > split(const at::Tensor &self, int64_t split_size, int64_t dim=0)
Definition: Functions.h:7230
at::Tensor new_empty(const at::Tensor &self, at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:2646
void split_copy_outf(const at::Tensor &self, int64_t split_size, int64_t dim, at::TensorList out)
Definition: Functions.h:18979
at::Tensor & slow_conv_transpose3d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:16460
at::Tensor & upsample_nearest2d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15967
::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_group_norm_backward_outf(const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21462
at::Tensor & slice_scatter_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &src, int64_t dim=0, c10::optional< int64_t > start=c10::nullopt, c10::optional< int64_t > end=c10::nullopt, int64_t step=1)
Definition: Functions.h:22634
at::Tensor & _upsample_bilinear2d_aa_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15032
at::Tensor & _upsample_nearest_exact3d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:16275
at::Tensor & slice_backward_outf(const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step, at::Tensor &out)
Definition: Functions.h:22601
at::Tensor full(at::IntArrayRef size, const at::Scalar &fill_value, at::TensorOptions options={})
Definition: Functions.h:3194
at::Tensor & to_padded_tensor_out(at::Tensor &out, const at::Tensor &self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt)
Definition: Functions.h:25905
at::Tensor _pack_padded_sequence_backward(const at::Tensor &grad, at::IntArrayRef input_size, const at::Tensor &batch_sizes, bool batch_first)
Definition: Functions.h:9344
at::Tensor slice(const at::Tensor &self, int64_t dim=0, c10::optional< int64_t > start=c10::nullopt, c10::optional< int64_t > end=c10::nullopt, int64_t step=1)
Definition: Functions.h:7041
at::Tensor & empty_strided_out(at::Tensor &out, at::IntArrayRef size, at::IntArrayRef stride)
Definition: Functions.h:21192
at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales, at::Tensor &out)
Definition: Functions.h:15615
at::Tensor new_ones(const at::Tensor &self, at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:2758
at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15802
::std::vector< at::Tensor > split_copy(const at::Tensor &self, int64_t split_size, int64_t dim=0)
Definition: Functions.h:18840
at::Tensor & slice_backward_out(at::Tensor &out, const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step)
Definition: Functions.h:22590
at::Tensor replication_pad1d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14152
at::Tensor & as_strided_copy_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset=c10::nullopt)
Definition: Functions.h:25452
at::Tensor & miopen_convolution_transpose_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor &out)
Definition: Functions.h:21921
void split_with_sizes_copy_out(at::TensorList out, const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:19012
at::Tensor index_select_backward(const at::Tensor &grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor &index)
Definition: Functions.h:10304
::std::vector< at::Tensor > unsafe_split(const at::Tensor &self, int64_t split_size, int64_t dim=0)
Definition: Functions.h:7208
at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales, at::Tensor &grad_input)
Definition: Functions.h:15703
at::Tensor upsample_nearest1d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15780
at::Tensor adaptive_avg_pool3d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13336
at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:6273
at::Tensor embedding(const at::Tensor &weight, const at::Tensor &indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false)
Definition: Functions.h:2411
at::Tensor & unfold_backward_out(at::Tensor &out, const at::Tensor &grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step)
Definition: Functions.h:24345
::std::tuple< at::Tensor &, at::Tensor & > nll_loss_forward_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, at::Tensor &output, at::Tensor &total_weight)
Definition: Functions.h:12461
void split_copy_out(at::TensorList out, const at::Tensor &self, int64_t split_size, int64_t dim=0)
Definition: Functions.h:18968
const at::Tensor & _conv_depthwise2d_out(const at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation)
Definition: Functions.h:16568
at::Tensor & _upsample_nearest_exact1d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15604
at::Tensor & slow_conv3d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:16667
at::Tensor & narrow_copy_out(at::Tensor &out, const at::Tensor &self, int64_t dim, int64_t start, int64_t length)
Definition: Functions.h:4969
at::Tensor expand(const at::Tensor &self, at::IntArrayRef size, bool implicit=false)
Definition: Functions.h:3024
at::Tensor & slice_copy_outf(const at::Tensor &self, int64_t dim, c10::optional< int64_t > start, c10::optional< int64_t > end, int64_t step, at::Tensor &out)
Definition: Functions.h:25675
at::Tensor value_selecting_reduction_backward(const at::Tensor &grad, int64_t dim, const at::Tensor &indices, at::IntArrayRef sizes, bool keepdim)
Definition: Functions.h:4440
at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15736
at::Tensor _nnpack_spatial_convolution(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride=1)
Definition: Functions.h:5158
at::Tensor as_strided_scatter(const at::Tensor &self, const at::Tensor &src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset=c10::nullopt)
Definition: Functions.h:7134
at::Tensor trace_backward(const at::Tensor &grad, at::IntArrayRef sizes)
Definition: Functions.h:9918
at::Tensor adaptive_avg_pool2d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13224
at::Tensor _reshape_alias_copy(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride)
Definition: Functions.h:18769
at::Tensor select_backward(const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index)
Definition: Functions.h:6805
at::Tensor & _upsample_bilinear2d_aa_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15098
at::Tensor & select_copy_outf(const at::Tensor &self, int64_t dim, int64_t index, at::Tensor &out)
Definition: Functions.h:25622
at::Tensor & slow_conv3d_forward_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor &output)
Definition: Functions.h:16733
at::Tensor new_empty_strided(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={})
Definition: Functions.h:2674
at::Tensor & ones_outf(at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:5244
at::Tensor unfold_backward(const at::Tensor &grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step)
Definition: Functions.h:11322
at::Tensor nll_loss_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, const at::Tensor &total_weight)
Definition: Functions.h:12560
at::Tensor & nll_loss_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, at::Tensor &out)
Definition: Functions.h:12373
::std::tuple< at::Tensor &, at::Tensor & > nll_loss_forward_out(at::Tensor &output, at::Tensor &total_weight, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index)
Definition: Functions.h:12450
at::Tensor & new_full_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, const at::Scalar &fill_value)
Definition: Functions.h:20944
at::Tensor col2im(const at::Tensor &self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride)
Definition: Functions.h:16876
at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:22113
at::Tensor set(const at::Tensor &self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={})
Definition: Functions.h:23922
at::Tensor & upsample_linear1d_outf(const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales, at::Tensor &out)
Definition: Functions.h:14779
at::Tensor upsample_nearest3d(const at::Tensor &input, at::OptionalIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14724
at::Tensor & col2im_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride)
Definition: Functions.h:16832
at::Tensor & as_strided_copy_outf(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset, at::Tensor &out)
Definition: Functions.h:25463
at::Tensor & unfold_backward_outf(const at::Tensor &grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor &out)
Definition: Functions.h:24356
::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_layer_norm_backward(const at::Tensor &grad_out, const at::Tensor &input, at::IntArrayRef normalized_shape, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:3878
at::Tensor reshape(const at::Tensor &self, at::IntArrayRef shape)
Definition: Functions.h:6539
::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_layer_norm(const at::Tensor &input, at::IntArrayRef normalized_shape, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, double eps)
Definition: Functions.h:3856
at::Tensor & upsample_trilinear3d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15505
at::Tensor repeat_interleave(const at::Tensor &self, int64_t repeats, c10::optional< int64_t > dim=c10::nullopt, c10::optional< int64_t > output_size=c10::nullopt)
Definition: Functions.h:6517
at::Tensor & replication_pad3d_outf(const at::Tensor &self, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:14317
Definition: TensorBase.h:34
at::Tensor & _amp_update_scale_outf(const at::Tensor &self, at::Tensor &growth_tracker, const at::Tensor &found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor &out)
Definition: Functions.h:24411
at::Tensor log_normal(const at::Tensor &self, double mean=1, double std=2, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:24217
at::Tensor & slice_copy_symint_out(at::Tensor &out, const at::Tensor &self, int64_t dim=0, c10::optional< c10::SymInt > start=c10::nullopt, c10::optional< c10::SymInt > end=c10::nullopt, c10::SymInt step=1)
Definition: Functions.h:25681
at::Tensor & index_put_(at::Tensor &self, const c10::List< c10::optional< at::Tensor > > &indices, const at::Tensor &values, bool accumulate=false)
Definition: Functions.h:3655
at::Tensor avg_pool3d(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional< int64_t > divisor_override=c10::nullopt)
Definition: Functions.h:13482
at::Tensor broadcast_to_symint(const at::Tensor &self, c10::SymIntArrayRef size)
Definition: Functions.h:1305
at::Tensor & fmax_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:11050
at::Tensor & _histogramdd_from_bin_tensors_out(at::Tensor &out, const at::Tensor &self, at::TensorList bins, const c10::optional< at::Tensor > &weight={}, bool density=false)
Definition: Functions.h:24313
at::Tensor ne(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9944
int64_t q_per_channel_axis(const at::Tensor &self)
Definition: Functions.h:9040
at::Tensor & _convert_indices_from_csr_to_coo_outf(const at::Tensor &crow_indices, const at::Tensor &col_indices, bool out_int32, bool transpose, at::Tensor &out)
Definition: Functions.h:12249
at::Tensor masked_select(const at::Tensor &self, const at::Tensor &mask)
Definition: Functions.h:10330
at::Tensor diagonal_backward(const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2)
Definition: Functions.h:2207
at::Tensor & triu_indices_outf(int64_t row, int64_t col, int64_t offset, at::Tensor &out)
Definition: Functions.h:24263
at::Tensor & lt_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:10191
inline ::std::tuple< at::Tensor &, at::Tensor & > fake_quantize_per_channel_affine_cachemask_outf(const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:23759
at::Tensor & _reshape_alias_copy_symint_outf(const at::Tensor &self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor &out)
Definition: Functions.h:25595
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_layer_norm_backward_symint_outf(const at::Tensor &grad_out, const at::Tensor &input, c10::SymIntArrayRef normalized_shape, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21599
at::Tensor & diagonal_backward_out(at::Tensor &out, const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2)
Definition: Functions.h:20607
at::Tensor fft_hfft2(const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17623
at::Tensor slice_copy_symint(const at::Tensor &self, int64_t dim=0, c10::optional< c10::SymInt > start=c10::nullopt, c10::optional< c10::SymInt > end=c10::nullopt, c10::SymInt step=1)
Definition: Functions.h:18824
inline ::std::tuple< at::Tensor &, at::Tensor & > triangular_solve_out(at::Tensor &X, at::Tensor &M, const at::Tensor &self, const at::Tensor &A, bool upper=true, bool transpose=false, bool unitriangular=false)
Definition: Functions.h:10452
at::Tensor & special_bessel_y0_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:19253
at::Tensor & _fake_quantize_learnable_per_tensor_affine_outf(const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor &out)
Definition: Functions.h:23750
inline ::std::vector< at::Tensor > chunk(const at::Tensor &self, int64_t chunks, int64_t dim=0)
Definition: Functions.h:1448
at::Tensor _mkldnn_transpose(const at::Tensor &self, int64_t dim0, int64_t dim1)
Definition: Functions.h:7786
at::Tensor linalg_vander(const at::Tensor &x, c10::optional< int64_t > N=c10::nullopt)
Definition: Functions.h:10485
inline ::std::tuple< at::Tensor &, at::Tensor & > _pack_padded_sequence_outf(const at::Tensor &input, const at::Tensor &lengths, bool batch_first, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:23854
at::Tensor _conj_copy(const at::Tensor &self)
Definition: Functions.h:18695
at::Tensor narrow(const at::Tensor &self, int64_t dim, int64_t start, int64_t length)
Definition: Functions.h:5008
at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:22108
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _transform_bias_rescale_qkv(const at::Tensor &qkv, const at::Tensor &qkv_bias, int64_t num_heads)
Definition: Functions.h:7846
at::Tensor flatten(const at::Tensor &self, int64_t start_dim=0, int64_t end_dim=-1)
Definition: Functions.h:3073
at::Tensor & avg_pool3d_outf(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional< int64_t > divisor_override, at::Tensor &out)
Definition: Functions.h:13477
at::Tensor view_copy_symint(const at::Tensor &self, c10::SymIntArrayRef size)
Definition: Functions.h:19062
at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16325
at::Tensor & isposinf_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:16936
at::Tensor & _pin_memory_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::Device > device=c10::nullopt)
Definition: Functions.h:22231
at::Tensor & empty_like_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:21178
at::Tensor full(at::IntArrayRef size, const at::Scalar &fill_value, c10::optional< at::DimnameList > names, at::TensorOptions options={})
Definition: Functions.h:3180
at::Tensor expand_copy_symint(const at::Tensor &self, c10::SymIntArrayRef size, bool implicit=false)
Definition: Functions.h:18748
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > convolution_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:20303
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_layer_norm_symint_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &input, c10::SymIntArrayRef normalized_shape, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, double eps)
Definition: Functions.h:21544
at::Tensor & linalg_matrix_exp_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25303
at::Tensor & where_out(at::Tensor &out, const at::Tensor &condition, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:8082
at::Tensor & _convert_indices_from_csr_to_coo_out(at::Tensor &out, const at::Tensor &crow_indices, const at::Tensor &col_indices, bool out_int32=false, bool transpose=false)
Definition: Functions.h:12245
at::Tensor & upsample_bicubic2d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15181
at::Tensor & absolute_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:381
at::Tensor & permute_copy_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dims)
Definition: Functions.h:25553
at::Tensor & _upsample_nearest_exact2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15863
at::Tensor embedding_renorm(const at::Tensor &self, const at::Tensor &indices, double max_norm, double norm_type)
Definition: Functions.h:20766
void _foreach_log_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24772
at::Tensor & subtract_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha=1)
Definition: Functions.h:8537
at::Tensor fft_ifft(const at::Tensor &self, c10::optional< int64_t > n=c10::nullopt, int64_t dim=-1, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17497
at::Tensor & special_gammaln_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17072
at::Tensor & trunc_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:7905
at::Tensor fft_rfftfreq(int64_t n, double d=1.0, at::TensorOptions options={})
Definition: Functions.h:17753
at::Tensor & hstack_outf(at::TensorList tensors, at::Tensor &out)
Definition: Functions.h:7414
at::Tensor & linalg_svdvals_outf(const at::Tensor &A, c10::optional< c10::string_view > driver, at::Tensor &out)
Definition: Functions.h:18323
at::Tensor & trace_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:24272
at::Tensor & _fft_r2c_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim, int64_t normalization, bool onesided)
Definition: Functions.h:3508
at::Tensor & hardtanh_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &min_val, const at::Scalar &max_val)
Definition: Functions.h:12977
at::Tensor xlogy(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4134
at::Tensor & fft_fft2_outf(const at::Tensor &self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17576
at::Tensor & _softmax_outf(const at::Tensor &self, int64_t dim, bool half_to_float, at::Tensor &out)
Definition: Functions.h:7184
at::Tensor corrcoef(const at::Tensor &self)
Definition: Functions.h:1945
at::Tensor cholesky_solve(const at::Tensor &self, const at::Tensor &input2, bool upper=false)
Definition: Functions.h:10537
inline ::std::vector< at::Tensor > split_with_sizes_symint(const at::Tensor &self, c10::SymIntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:7302
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > unique_dim_consecutive(const at::Tensor &self, int64_t dim, bool return_inverse=false, bool return_counts=false)
Definition: Functions.h:7949
at::Tensor & lcm_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3314
at::Tensor concatenate(at::TensorList tensors, int64_t dim=0)
Definition: Functions.h:1377
at::Tensor arctanh(const at::Tensor &self)
Definition: Functions.h:876
at::Tensor atleast_2d(const at::Tensor &self)
Definition: Functions.h:1025
inline ::std::tuple< at::Tensor, at::Tensor > _unpack_dual(const at::Tensor &dual, int64_t level)
Definition: Functions.h:128
at::Tensor & addmv_(at::Tensor &self, const at::Tensor &mat, const at::Tensor &vec, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:589
at::Tensor & to_sparse_csc_outf(const at::Tensor &self, c10::optional< int64_t > dense_dim, at::Tensor &out)
Definition: Functions.h:23570
at::Tensor & mul_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4881
at::Tensor & unfold_backward_outf(const at::Tensor &grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor &out)
Definition: Functions.h:24351
void _foreach_lgamma_(at::TensorList self)
Definition: Functions.h:12058
void _foreach_minimum_(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11653
bool __dispatch_is_conj(const at::Tensor &self)
Definition: Functions.h:3742
at::Tensor & reciprocal_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:6445
at::Tensor & _make_dual_copy_outf(const at::Tensor &primal, const at::Tensor &tangent, int64_t level, at::Tensor &out)
Definition: Functions.h:25406
at::Tensor & slice_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor &out)
Definition: Functions.h:22618
at::Tensor & unfold_backward_out(at::Tensor &out, const at::Tensor &grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step)
Definition: Functions.h:24340
at::Tensor & adaptive_max_pool3d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &indices, at::Tensor &grad_input)
Definition: Functions.h:13435
at::Tensor linalg_pinv(const at::Tensor &self, const c10::optional< at::Tensor > &atol={}, const c10::optional< at::Tensor > &rtol={}, bool hermitian=false)
Definition: Functions.h:18356
at::Tensor _standard_gamma(const at::Tensor &self, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:8256
at::Tensor & baddbmm_outf(const at::Tensor &self, const at::Tensor &batch1, const at::Tensor &batch2, const at::Scalar &beta, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:1054
at::Tensor & clip_(at::Tensor &self, const c10::optional< at::Scalar > &min, const c10::optional< at::Scalar > &max=c10::nullopt)
Definition: Functions.h:1626
at::Tensor trapezoid(const at::Tensor &y, const at::Tensor &x, int64_t dim=-1)
Definition: Functions.h:7826
at::Tensor exp2(const at::Tensor &self)
Definition: Functions.h:2985
at::Tensor rsqrt(const at::Tensor &self)
Definition: Functions.h:6754
at::Tensor & _triton_multi_head_attention_out(at::Tensor &out, const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, int64_t embed_dim, int64_t num_head, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, const c10::optional< at::Tensor > &mask={})
Definition: Functions.h:25971
at::Tensor & binomial_outf(const at::Tensor &count, const at::Tensor &prob, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:23185
at::Tensor & ceil_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:1420
at::Tensor fft_irfftn(const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17693
inline ::std::vector< at::Tensor > _foreach_atan(at::TensorList self)
Definition: Functions.h:11873
at::Tensor convolution_overrideable(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups)
Definition: Functions.h:1753
at::Tensor & glu_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, int64_t dim, at::Tensor &grad_input)
Definition: Functions.h:12910
at::Tensor & bitwise_not_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:1168
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_ldl_factor_out(at::Tensor &LD, at::Tensor &pivots, const at::Tensor &self, bool hermitian=false)
Definition: Functions.h:17931
at::Tensor full_like(const at::Tensor &self, const at::Scalar &fill_value, at::TensorOptions options={}, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:3277
at::Tensor dequantize(const at::Tensor &self)
Definition: Functions.h:9010
at::Tensor & special_exp2_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17030
void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional< at::Tensor > &grad_scale={}, const c10::optional< at::Tensor > &found_inf={})
Definition: Functions.h:26007
inline ::std::tuple< at::Tensor &, at::Tensor & > grid_sampler_3d_backward_outf(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array< bool, 2 > output_mask, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:21316
void unsafe_split_outf(const at::Tensor &self, int64_t split_size, int64_t dim, at::TensorList out)
Definition: Functions.h:22781
at::Tensor & elu_backward_outf(const at::Tensor &grad_output, const at::Scalar &alpha, const at::Scalar &scale, const at::Scalar &input_scale, bool is_result, const at::Tensor &self_or_result, at::Tensor &grad_input)
Definition: Functions.h:12877
at::Tensor & sparse_coo_tensor_out(at::Tensor &out, at::IntArrayRef size)
Definition: Functions.h:23394
at::Tensor & concatenate_outf(at::TensorList tensors, int64_t dim, at::Tensor &out)
Definition: Functions.h:1386
inline ::std::tuple< at::Tensor, at::Tensor > kthvalue(const at::Tensor &self, int64_t k, int64_t dim=-1, bool keepdim=false)
Definition: Functions.h:3801
at::Tensor & cumsum_outf(const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:2123
at::Tensor & lgamma_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:10664
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_solve_ex_outf(const at::Tensor &A, const at::Tensor &B, bool left, bool check_errors, at::Tensor &result, at::Tensor &info)
Definition: Functions.h:18435
at::Tensor & softplus_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &beta, const at::Scalar &threshold)
Definition: Functions.h:13133
void _validate_sparse_csc_tensor_args(const at::Tensor &ccol_indices, const at::Tensor &row_indices, const at::Tensor &values, at::IntArrayRef size)
Definition: Functions.h:8858
at::Tensor & _amp_update_scale_(at::Tensor &self, at::Tensor &growth_tracker, const at::Tensor &found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval)
Definition: Functions.h:11573
at::Tensor & row_stack_outf(at::TensorList tensors, at::Tensor &out)
Definition: Functions.h:2501
at::Tensor _upsample_bicubic2d_aa(const at::Tensor &input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14609
at::Tensor floor_divide(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3142
at::Tensor & special_i1_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17314
at::Tensor & slice_copy_symint_outf(const at::Tensor &self, int64_t dim, c10::optional< c10::SymInt > start, c10::optional< c10::SymInt > end, c10::SymInt step, at::Tensor &out)
Definition: Functions.h:25692
at::Tensor convolution_symint(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups)
Definition: Functions.h:1720
at::Tensor & empty_out(at::Tensor &out, at::IntArrayRef size, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:2831
at::Tensor set_symint(const at::Tensor &self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={})
Definition: Functions.h:23928
at::Tensor _convolution(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32)
Definition: Functions.h:1763
at::Tensor & select_backward_symint_out(at::Tensor &out, const at::Tensor &grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index)
Definition: Functions.h:22554
at::Tensor & _unsafe_view_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:23038
at::Tensor detach_copy(const at::Tensor &self)
Definition: Functions.h:18808
at::Tensor & rrelu_with_noise_outf(const at::Tensor &self, const at::Tensor &noise, const at::Scalar &lower, const at::Scalar &upper, bool training, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:13099
at::Tensor _stack(at::TensorList tensors, int64_t dim=0)
Definition: Functions.h:7391
at::Tensor rand(at::IntArrayRef size, c10::optional< at::DimnameList > names, at::TensorOptions options={})
Definition: Functions.h:5438
at::Tensor special_legendre_polynomial_p(const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19566
at::Tensor & silu_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:6878
at::Tensor avg_pool1d(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true)
Definition: Functions.h:521
at::Tensor & concatenate_out(at::Tensor &out, at::TensorList tensors, int64_t dim=0)
Definition: Functions.h:1382
at::Tensor special_xlog1py(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:17151
at::Tensor flipud(const at::Tensor &self)
Definition: Functions.h:7811
at::Tensor & miopen_convolution_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:21883
at::Tensor & reflection_pad2d_outf(const at::Tensor &self, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:13784
at::Tensor & rsqrt_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:6764
at::Tensor & max_pool3d_with_indices_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor &indices)
Definition: Functions.h:13599
at::Tensor & log10_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:4063
at::Tensor & index_copy_outf(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &source, at::Tensor &out)
Definition: Functions.h:3640
at::Tensor & _histogramdd_from_bin_cts_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef bins, c10::optional< at::ArrayRef< double > > range=c10::nullopt, const c10::optional< at::Tensor > &weight={}, bool density=false)
Definition: Functions.h:24304
at::Tensor & scatter_reduce_outf(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &src, c10::string_view reduce, bool include_self, at::Tensor &out)
Definition: Functions.h:9600
at::Tensor & slow_conv3d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:16662
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > unique_dim_outf(const at::Tensor &self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:23006
at::Tensor & linspace_out(at::Tensor &out, const at::Scalar &start, const at::Scalar &end, int64_t steps)
Definition: Functions.h:4021
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor > _fused_moving_avg_obs_fq_helper_functional(const at::Tensor &self, const at::Tensor &observer_on, const at::Tensor &fake_quant_on, const at::Tensor &running_min, const at::Tensor &running_max, const at::Tensor &scale, const at::Tensor &zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false)
Definition: Functions.h:23782
at::Tensor arctan(const at::Tensor &self)
Definition: Functions.h:996
at::Tensor & upsample_linear1d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales, at::Tensor &grad_input)
Definition: Functions.h:14840
at::Tensor & soft_margin_loss_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:12831
at::Tensor & _fft_c2c_outf(const at::Tensor &self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor &out)
Definition: Functions.h:3564
at::Tensor & zeros_outf(at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:8209
at::Tensor complex(const at::Tensor &real, const at::Tensor &imag)
Definition: Functions.h:1659
at::Tensor quantile(const at::Tensor &self, const at::Tensor &q, c10::optional< int64_t > dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear")
Definition: Functions.h:11120
at::Tensor & atanh_(at::Tensor &self)
Definition: Functions.h:862
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > linear_backward(const at::Tensor &self, const at::Tensor &grad_output, const at::Tensor &weight, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:3919
inline ::std::vector< at::Tensor > _foreach_sub(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11588
at::Tensor & hardsigmoid_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::Tensor &grad_input)
Definition: Functions.h:12953
at::Tensor & exp2_(at::Tensor &self)
Definition: Functions.h:2990
at::Tensor & histc_out(at::Tensor &out, const at::Tensor &self, int64_t bins=100, const at::Scalar &min=0, const at::Scalar &max=0)
Definition: Functions.h:10828
at::Tensor reciprocal(const at::Tensor &self)
Definition: Functions.h:6431
at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={})
Definition: Functions.h:2884
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _slow_conv2d_backward_out(at::Tensor &grad_input, at::Tensor &grad_weight, at::Tensor &grad_bias, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding)
Definition: Functions.h:16549
inline ::std::tuple< at::Tensor, at::Tensor > _pad_packed_sequence(const at::Tensor &data, const at::Tensor &batch_sizes, bool batch_first, const at::Scalar &padding_value, int64_t total_length)
Definition: Functions.h:9361
at::Tensor & special_modified_bessel_i1_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:19631
at::Tensor & upsample_nearest3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16083
at::Tensor & multilabel_margin_loss_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:12315
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > svd_outf(const at::Tensor &self, bool some, bool compute_uv, at::Tensor &U, at::Tensor &S, at::Tensor &V)
Definition: Functions.h:10494
at::Tensor & linear_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias={})
Definition: Functions.h:3924
at::Tensor & hardshrink_backward_outf(const at::Tensor &grad_out, const at::Tensor &self, const at::Scalar &lambd, at::Tensor &grad_input)
Definition: Functions.h:6744
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_layer_norm_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &input, at::IntArrayRef normalized_shape, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, double eps)
Definition: Functions.h:21522
at::Tensor randint_like(const at::Tensor &self, int64_t high, at::TensorOptions options={}, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:6063
at::Tensor & conj_physical_(at::Tensor &self)
Definition: Functions.h:463
at::Tensor & binary_cross_entropy_with_logits_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &pos_weight, int64_t reduction, at::Tensor &out)
Definition: Functions.h:20174
at::Tensor & scalar_tensor_outf(const at::Scalar &s, at::Tensor &out)
Definition: Functions.h:22244
at::Tensor & argsort_out(at::Tensor &out, const at::Tensor &self, bool stable, int64_t dim=-1, bool descending=false)
Definition: Functions.h:24331
at::Tensor hardshrink_backward(const at::Tensor &grad_out, const at::Tensor &self, const at::Scalar &lambd)
Definition: Functions.h:6749
inline ::std::tuple<::std::vector< at::Tensor >,::std::vector< at::Tensor >,::std::vector< at::Tensor >,::std::vector< at::Tensor >,::std::vector< at::Tensor > > _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional< at::Tensor > &grad_scale={}, const c10::optional< at::Tensor > &found_inf={})
Definition: Functions.h:26016
at::Tensor & arange_out(at::Tensor &out, const at::Scalar &end)
Definition: Functions.h:730
at::Tensor _pack_padded_sequence_backward(const at::Tensor &grad, at::IntArrayRef input_size, const at::Tensor &batch_sizes, bool batch_first)
Definition: Functions.h:9339
inline ::std::tuple< at::Tensor, at::Tensor > log_sigmoid_forward(const at::Tensor &self)
Definition: Functions.h:13076
at::Tensor & __lshift___out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:24084
at::Tensor & matmul_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4336
at::Tensor & lu_solve_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &LU_data, const at::Tensor &LU_pivots)
Definition: Functions.h:10622
at::Tensor & rot90_outf(const at::Tensor &self, int64_t k, at::IntArrayRef dims, at::Tensor &out)
Definition: Functions.h:22916
at::Tensor & upsample_bicubic2d_outf(const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15170
at::Tensor & as_strided_copy_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset=c10::nullopt)
Definition: Functions.h:25447
at::Tensor & celu_(at::Tensor &self, const at::Scalar &alpha=1.0)
Definition: Functions.h:6859
at::Tensor real(const at::Tensor &self)
Definition: Functions.h:424
at::Tensor & square_(at::Tensor &self)
Definition: Functions.h:7548
inline ::std::tuple< at::Tensor &, at::Tensor & > nll_loss2d_forward_symint_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor &output, at::Tensor &total_weight)
Definition: Functions.h:12676
at::Tensor replication_pad1d_symint(const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14092
at::Tensor mvlgamma(const at::Tensor &self, int64_t p)
Definition: Functions.h:4937
at::Tensor & nll_loss2d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100)
Definition: Functions.h:12577
at::Tensor & slice_backward_outf(const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step, at::Tensor &out)
Definition: Functions.h:22596
at::Tensor _cdist_forward(const at::Tensor &x1, const at::Tensor &x2, double p, c10::optional< int64_t > compute_mode)
Definition: Functions.h:5296
inline ::std::tuple< at::Tensor, at::Tensor > adaptive_max_pool1d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:531
at::Tensor cumsum(const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:2114
inline ::std::tuple< at::Tensor &, at::Tensor & > _linalg_eigh_outf(const at::Tensor &A, c10::string_view UPLO, bool compute_v, at::Tensor &eigenvalues, at::Tensor &eigenvectors)
Definition: Functions.h:18085
at::Tensor cat(const at::ITensorListRef &tensors, int64_t dim=0)
Definition: Functions.h:1321
at::Tensor & fft_rfftfreq_outf(int64_t n, double d, at::Tensor &out)
Definition: Functions.h:17766
at::Tensor & _mkldnn_reshape_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef shape)
Definition: Functions.h:22514
at::Tensor & upsample_bicubic2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15225
at::Tensor mkldnn_linear(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias={})
Definition: Functions.h:3933
at::Tensor quantized_rnn_relu_cell(const at::Tensor &input, const at::Tensor &hx, const at::Tensor &w_ih, const at::Tensor &w_hh, const at::Tensor &b_ih, const at::Tensor &b_hh, const at::Tensor &packed_ih, const at::Tensor &packed_hh, const at::Tensor &col_offsets_ih, const at::Tensor &col_offsets_hh, const at::Scalar &scale_ih, const at::Scalar &scale_hh, const at::Scalar &zero_point_ih, const at::Scalar &zero_point_hh)
Definition: Functions.h:9324
inline ::std::tuple< at::Tensor &, at::Tensor & > frexp_out(at::Tensor &mantissa, at::Tensor &exponent, const at::Tensor &self)
Definition: Functions.h:8442
at::Tensor log10(const at::Tensor &self)
Definition: Functions.h:4049
at::Tensor _standard_gamma_grad(const at::Tensor &self, const at::Tensor &output)
Definition: Functions.h:8251
inline ::std::tuple< at::Tensor &, at::Tensor & > nll_loss2d_forward_out(at::Tensor &output, at::Tensor &total_weight, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index)
Definition: Functions.h:12643
bool _use_cudnn_ctc_loss(const at::Tensor &log_probs, const at::Tensor &targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank)
Definition: Functions.h:158
at::Tensor & special_psi_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17044
at::Tensor & _to_copy_out(at::Tensor &out, const at::Tensor &self, bool non_blocking=false, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:23787
at::Tensor & upsample_trilinear3d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15456
at::Tensor & full_out(at::Tensor &out, at::IntArrayRef size, const at::Scalar &fill_value)
Definition: Functions.h:3233
at::Tensor & all_outf(const at::Tensor &self, int64_t dim, bool keepdim, at::Tensor &out)
Definition: Functions.h:651
at::Tensor softplus_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &beta, const at::Scalar &threshold)
Definition: Functions.h:13142
at::Tensor leaky_relu(const at::Tensor &self, const at::Scalar &negative_slope=0.01)
Definition: Functions.h:13029
at::Tensor & hspmm_outf(const at::Tensor &mat1, const at::Tensor &mat2, at::Tensor &out)
Definition: Functions.h:8945
void _foreach_addcdiv_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar &value=1)
Definition: Functions.h:24903
at::Tensor & multi_margin_loss_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const at::Scalar &p, const at::Scalar &margin, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:12301
at::Tensor & addbmm_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &batch1, const at::Tensor &batch2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:9820
void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional< at::Tensor > &grad_scale={}, const c10::optional< at::Tensor > &found_inf={})
Definition: Functions.h:19879
at::Tensor index_select_backward(const at::Tensor &grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor &index)
Definition: Functions.h:10299
void _foreach_zero_(at::TensorList self)
Definition: Functions.h:11823
at::Tensor & fft_ifft_outf(const at::Tensor &self, c10::optional< int64_t > n, int64_t dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17506
at::Tensor & constant_pad_nd_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef pad, const at::Scalar &value=0)
Definition: Functions.h:20215
at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor &input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14554
at::Tensor random(const at::Tensor &self, int64_t from, c10::optional< int64_t > to, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:24147
at::Tensor & _upsample_nearest_exact2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15995
at::Tensor fused_moving_avg_obs_fake_quant(const at::Tensor &self, const at::Tensor &observer_on, const at::Tensor &fake_quant_on, at::Tensor &running_min, at::Tensor &running_max, at::Tensor &scale, at::Tensor &zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false)
Definition: Functions.h:9120
at::Tensor & new_empty_symint_outf(const at::Tensor &self, c10::SymIntArrayRef size, at::Tensor &out)
Definition: Functions.h:20884
at::Tensor dot(const at::Tensor &self, const at::Tensor &tensor)
Definition: Functions.h:2373
at::Tensor _sparse_log_softmax(const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:8346
at::Tensor & linalg_norm_out(at::Tensor &out, const at::Tensor &self, const c10::optional< at::Scalar > &ord=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:18226
void _histogramdd_bin_edges_out(at::TensorList out, const at::Tensor &self, at::IntArrayRef bins, c10::optional< at::ArrayRef< double > > range=c10::nullopt, const c10::optional< at::Tensor > &weight={}, bool density=false)
Definition: Functions.h:24295
at::Tensor & _reshape_alias_copy_outf(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:25573
at::Tensor vander(const at::Tensor &x, c10::optional< int64_t > N=c10::nullopt, bool increasing=false)
Definition: Functions.h:7986
at::Tensor repeat_interleave(const at::Tensor &repeats, c10::optional< int64_t > output_size=c10::nullopt)
Definition: Functions.h:6502
inline ::std::tuple< at::Tensor &, at::Tensor & > _fused_moving_avg_obs_fq_helper_outf(const at::Tensor &self, const at::Tensor &observer_on, const at::Tensor &fake_quant_on, at::Tensor &running_min, at::Tensor &running_max, at::Tensor &scale, at::Tensor &zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:23777
inline ::std::tuple< at::Tensor &, at::Tensor & > fractional_max_pool2d_out(at::Tensor &output, at::Tensor &indices, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor &random_samples)
Definition: Functions.h:13501
at::Tensor & fmin_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:11027
at::Tensor & new_zeros_outf(const at::Tensor &self, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:20994
at::Tensor cosine_embedding_loss(const at::Tensor &input1, const at::Tensor &input2, const at::Tensor &target, double margin=0.0, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:1925
at::Tensor & view_as_complex_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25424
at::Tensor _conj(const at::Tensor &self)
Definition: Functions.h:434
inline ::std::tuple< at::Tensor &, at::Tensor & > nll_loss_forward_symint_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor &output, at::Tensor &total_weight)
Definition: Functions.h:12478
at::Tensor _reshape_copy_symint(const at::Tensor &self, c10::SymIntArrayRef size)
Definition: Functions.h:6567
at::Tensor & nll_loss2d_backward_symint_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor &total_weight, at::Tensor &grad_input)
Definition: Functions.h:12742
at::Tensor & _segment_reduce_backward_out(at::Tensor &out, const at::Tensor &grad, const at::Tensor &output, const at::Tensor &data, c10::string_view reduce, const c10::optional< at::Tensor > &lengths={}, const c10::optional< at::Tensor > &offsets={}, int64_t axis=0, const c10::optional< at::Scalar > &initial=c10::nullopt)
Definition: Functions.h:25375
at::Tensor & pow_outf(const at::Tensor &self, const at::Tensor &exponent, at::Tensor &out)
Definition: Functions.h:11348
at::Tensor & fft_hfft_outf(const at::Tensor &self, c10::optional< int64_t > n, int64_t dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17548
at::Tensor special_logit(const at::Tensor &self, c10::optional< double > eps=c10::nullopt)
Definition: Functions.h:17333
at::Tensor & any_outf(const at::Tensor &self, int64_t dim, bool keepdim, at::Tensor &out)
Definition: Functions.h:684
at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor &grad, const at::Tensor &self)
Definition: Functions.h:6721
at::Tensor & mkldnn_max_pool3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:21709
at::Tensor tanh_backward(const at::Tensor &grad_output, const at::Tensor &output)
Definition: Functions.h:16384
at::Tensor replication_pad2d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14279
inline ::std::tuple<::std::vector< at::Tensor >,::std::vector< at::Tensor >,::std::vector< at::Tensor >,::std::vector< at::Tensor >,::std::vector< at::Tensor > > _fused_adamw(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional< at::Tensor > &grad_scale={}, const c10::optional< at::Tensor > &found_inf={})
Definition: Functions.h:26030
at::Tensor & conv_tbc_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const at::Tensor &bias, int64_t pad=0)
Definition: Functions.h:20409
void _foreach_clamp_min_outf(at::TensorList self, const at::Scalar &scalar, at::TensorList out)
Definition: Functions.h:24461
void _foreach_sigmoid_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24885
at::Tensor & linalg_tensorsolve_outf(const at::Tensor &self, const at::Tensor &other, at::OptionalIntArrayRef dims, at::Tensor &out)
Definition: Functions.h:18477
inline ::std::vector< at::Tensor > _foreach_lerp(at::TensorList self, at::TensorList tensors1, at::TensorList weights)
Definition: Functions.h:12168
at::Tensor & _masked_softmax_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mask, c10::optional< int64_t > dim=c10::nullopt, c10::optional< int64_t > mask_type=c10::nullopt)
Definition: Functions.h:24012
at::Tensor _sparse_mm(const at::Tensor &sparse, const at::Tensor &dense)
Definition: Functions.h:4833
at::Tensor & sigmoid_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:6931
at::Tensor log_softmax(const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:4204
at::Tensor linalg_matrix_norm(const at::Tensor &self, const at::Scalar &ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:18258
at::Tensor _conv_depthwise2d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation)
Definition: Functions.h:16607
at::Tensor & sinc_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:6992
inline ::std::vector< at::Tensor > _foreach_exp(at::TensorList self)
Definition: Functions.h:11818
at::Tensor & zeros_symint_outf(c10::SymIntArrayRef size, at::Tensor &out)
Definition: Functions.h:8231
at::Tensor nested_to_padded_tensor(const at::Tensor &self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt)
Definition: Functions.h:18580
at::Tensor logsumexp(const at::Tensor &self, at::IntArrayRef dim, bool keepdim=false)
Definition: Functions.h:4293
at::Tensor reflection_pad2d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13883
at::Tensor & _copy_from_and_resize_outf(const at::Tensor &self, const at::Tensor &dst, at::Tensor &out)
Definition: Functions.h:20440
at::Tensor multi_margin_loss_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const at::Scalar &p, const at::Scalar &margin, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:12310
at::Tensor _embedding_bag_dense_backward(const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offset2bag, const at::Tensor &bag_size, const at::Tensor &maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx=-1)
Definition: Functions.h:2565
at::Tensor _make_per_channel_quantized_tensor(const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t axis)
Definition: Functions.h:9055
at::Tensor & native_norm_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &p=2)
Definition: Functions.h:23190
at::Tensor cholesky_inverse(const at::Tensor &self, bool upper=false)
Definition: Functions.h:10547
at::Tensor lt(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10196
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_qr_outf(const at::Tensor &A, c10::string_view mode, at::Tensor &Q, at::Tensor &R)
Definition: Functions.h:18491
at::Tensor _sparse_addmm(const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:8580
at::Tensor & _test_optional_floatlist_outf(const at::Tensor &values, c10::optional< at::ArrayRef< double > > addends, at::Tensor &out)
Definition: Functions.h:25334
at::Tensor fft_ihfftn(const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17721
inline ::std::tuple< at::Tensor, at::Tensor > min(const at::Tensor &self, int64_t dim, bool keepdim=false)
Definition: Functions.h:4639
at::Tensor masked_scatter(const at::Tensor &self, const at::Tensor &mask, const at::Tensor &source)
Definition: Functions.h:9419
at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor &indices, const at::Tensor &values, at::TensorOptions options)
Definition: Functions.h:8904
at::Tensor & unfold_copy_out(at::Tensor &out, const at::Tensor &self, int64_t dimension, int64_t size, int64_t step)
Definition: Functions.h:25882
at::Tensor conv_tbc(const at::Tensor &self, const at::Tensor &weight, const at::Tensor &bias, int64_t pad=0)
Definition: Functions.h:1847
at::Tensor argwhere(const at::Tensor &self)
Definition: Functions.h:10359
at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19734
at::Tensor & select_scatter_outf(const at::Tensor &self, const at::Tensor &src, int64_t dim, int64_t index, at::Tensor &out)
Definition: Functions.h:22684
inline ::std::vector< at::Tensor > _foreach_log10(at::TensorList self)
Definition: Functions.h:11963
at::Tensor & range_outf(const at::Scalar &start, const at::Scalar &end, at::Tensor &out)
Definition: Functions.h:6412
at::Tensor _cdist_backward(const at::Tensor &grad, const at::Tensor &x1, const at::Tensor &x2, double p, const at::Tensor &cdist)
Definition: Functions.h:5301
at::ScalarType promote_types(at::ScalarType type1, at::ScalarType type2)
Definition: Functions.h:9199
at::Tensor & max_unpool2d_outf(const at::Tensor &self, const at::Tensor &indices, at::IntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:13617
at::Tensor & arctanh_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:886
at::Tensor & feature_dropout_(at::Tensor &self, double p, bool train)
Definition: Functions.h:328
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_layer_norm_backward_symint_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &grad_out, const at::Tensor &input, c10::SymIntArrayRef normalized_shape, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:21588
at::Tensor & _upsample_bilinear2d_aa_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15104
void _histogramdd_bin_edges_outf(const at::Tensor &self, at::IntArrayRef bins, c10::optional< at::ArrayRef< double > > range, const c10::optional< at::Tensor > &weight, bool density, at::TensorList out)
Definition: Functions.h:24299
at::Tensor & threshold_(at::Tensor &self, const at::Scalar &threshold, const at::Scalar &value)
Definition: Functions.h:7743
inline ::std::vector< at::Tensor > meshgrid(at::TensorList tensors)
Definition: Functions.h:9154
at::Tensor _ctc_loss_backward(const at::Tensor &grad, const at::Tensor &log_probs, const at::Tensor &targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor &neg_log_likelihood, const at::Tensor &log_alpha, int64_t blank, bool zero_infinity=false)
Definition: Functions.h:2172
inline ::std::vector< at::Tensor > _foreach_tanh(at::TensorList self)
Definition: Functions.h:12013
inline ::std::tuple< at::Tensor, at::Tensor > _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, const at::Tensor &fake_quant_enabled, int64_t quant_min, int64_t quant_max)
Definition: Functions.h:9075
at::Tensor special_xlogy(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:17193
inline ::std::tuple< at::Tensor &, at::Tensor & > _native_multi_head_attention_outf(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, int64_t embed_dim, int64_t num_head, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, const c10::optional< at::Tensor > &mask, bool need_weights, bool average_attn_weights, c10::optional< int64_t > mask_type, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:25957
inline ::std::vector< at::Tensor > split_symint(const at::Tensor &self, c10::SymInt split_size, int64_t dim=0)
Definition: Functions.h:7236
at::Tensor & silu_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::Tensor &grad_input)
Definition: Functions.h:6887
inline ::std::tuple< at::Tensor &, at::Tensor & > _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, const at::Tensor &fake_quant_enabled, int64_t quant_min, int64_t quant_max)
Definition: Functions.h:23737
at::Tensor selu(const at::Tensor &self)
Definition: Functions.h:6844
at::Tensor & conv_depthwise3d_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation)
Definition: Functions.h:25184
at::Tensor & quantile_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &q, c10::optional< int64_t > dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear")
Definition: Functions.h:11125
at::Tensor gather(const at::Tensor &self, int64_t dim, const at::Tensor &index, bool sparse_grad=false)
Definition: Functions.h:10373
at::Tensor & narrow_copy_out(at::Tensor &out, const at::Tensor &self, int64_t dim, int64_t start, int64_t length)
Definition: Functions.h:4964
void split_copy_out(at::TensorList out, const at::Tensor &self, int64_t split_size, int64_t dim=0)
Definition: Functions.h:18963
at::Tensor & nonzero_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:10340
at::Tensor fft_irfft(const at::Tensor &self, c10::optional< int64_t > n=c10::nullopt, int64_t dim=-1, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17525
void _cudnn_rnn_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::TensorList out3, const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, const at::Tensor &output, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, const at::Tensor &reserve, ::std::array< bool, 4 > output_mask)
Definition: Functions.h:19995
inline ::std::tuple< at::Tensor, at::Tensor > linalg_slogdet(const at::Tensor &A)
Definition: Functions.h:18015
at::Tensor & binary_cross_entropy_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:1126
at::Tensor & orgqr_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &input2)
Definition: Functions.h:10594
at::Tensor & mse_loss_outf(const at::Tensor &self, const at::Tensor &target, int64_t reduction, at::Tensor &out)
Definition: Functions.h:12258
at::Tensor & _softmax_backward_data_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &output, int64_t dim, at::ScalarType input_dtype)
Definition: Functions.h:7194
at::Tensor special_scaled_modified_bessel_k0(const at::Tensor &x)
Definition: Functions.h:19664
at::Tensor fft_rfft(const at::Tensor &self, c10::optional< int64_t > n=c10::nullopt, int64_t dim=-1, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17511
at::Tensor & special_scaled_modified_bessel_k0_outf(const at::Tensor &x, at::Tensor &out)
Definition: Functions.h:19673
at::Tensor & softmax_out(at::Tensor &out, const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:7161
at::Tensor linalg_svdvals(const at::Tensor &A, c10::optional< c10::string_view > driver=c10::nullopt)
Definition: Functions.h:18314
at::Tensor & argmin_outf(const at::Tensor &self, c10::optional< int64_t > dim, bool keepdim, at::Tensor &out)
Definition: Functions.h:776
at::Tensor mkldnn_linear_backward_input(at::IntArrayRef input_size, const at::Tensor &grad_output, const at::Tensor &weight)
Definition: Functions.h:3938
at::Tensor & _test_autograd_multiple_dispatch_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25352
at::Tensor & col2im_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride)
Definition: Functions.h:16849
void _foreach_lgamma_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24862
at::Tensor & masked_select_outf(const at::Tensor &self, const at::Tensor &mask, at::Tensor &out)
Definition: Functions.h:10325
at::Tensor & replication_pad3d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14389
at::Tensor & addcdiv_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &tensor1, const at::Tensor &tensor2, const at::Scalar &value=1)
Definition: Functions.h:10416
at::Tensor & acosh_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:791
at::Tensor & stack_outf(at::TensorList tensors, int64_t dim, at::Tensor &out)
Definition: Functions.h:7386
at::Tensor & replication_pad3d_backward_symint_outf(const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:14400
at::Tensor logspace(const at::Scalar &start, const at::Scalar &end, int64_t steps, double base=10.0, at::TensorOptions options={})
Definition: Functions.h:4186
at::Tensor & _fft_c2r_outf(const at::Tensor &self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor &out)
Definition: Functions.h:3526
at::Tensor & col2im_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:16860
at::Tensor & lcm_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:3318
at::Tensor ones_symint(c10::SymIntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:5206
at::Tensor & _sparse_sparse_matmul_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:22011
at::Tensor & elu_outf(const at::Tensor &self, const at::Scalar &alpha, const at::Scalar &scale, const at::Scalar &input_scale, at::Tensor &out)
Definition: Functions.h:12863
at::Tensor & index_put_out(at::Tensor &out, const at::Tensor &self, const c10::List< c10::optional< at::Tensor > > &indices, const at::Tensor &values, bool accumulate=false)
Definition: Functions.h:21490
at::Tensor slice_scatter_symint(const at::Tensor &self, const at::Tensor &src, int64_t dim=0, c10::optional< c10::SymInt > start=c10::nullopt, c10::optional< c10::SymInt > end=c10::nullopt, c10::SymInt step=1)
Definition: Functions.h:7091
inline ::std::vector< at::Tensor > _foreach_reciprocal(at::TensorList self)
Definition: Functions.h:12073
at::ScalarType result_type(const at::Tensor &tensor, const at::Tensor &other)
Definition: Functions.h:9174
at::Tensor searchsorted(const at::Tensor &sorted_sequence, const at::Tensor &self, bool out_int32=false, bool right=false, c10::optional< c10::string_view > side=c10::nullopt, const c10::optional< at::Tensor > &sorter={})
Definition: Functions.h:12207
at::Tensor & slice_backward_symint_out(at::Tensor &out, const at::Tensor &grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step)
Definition: Functions.h:22607
at::Tensor sparse_csc_tensor(const at::Tensor &ccol_indices, const at::Tensor &row_indices, const at::Tensor &values, at::IntArrayRef size, at::TensorOptions options)
Definition: Functions.h:8655
at::Tensor & _upsample_nearest_exact2d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:16006
at::Tensor & reflection_pad2d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef padding, at::Tensor &out)
Definition: Functions.h:13806
at::Tensor & _adaptive_avg_pool2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25095
at::Tensor _nested_from_padded_and_nested_example(const at::Tensor &padded, const at::Tensor &nt_example)
Definition: Functions.h:7866
at::Tensor & to_padded_tensor_symint_outf(const at::Tensor &self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:25933
inline ::std::vector< at::Tensor > _foreach_addcmul(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar &value=1)
Definition: Functions.h:12138
at::Tensor lift(const at::Tensor &self)
Definition: Functions.h:9394
bool __dispatch_is_complex(const at::Tensor &self)
Definition: Functions.h:3737
bool is_distributed(const at::Tensor &self)
Definition: Functions.h:3727
at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales, at::Tensor &grad_input)
Definition: Functions.h:15764
at::Tensor nll_loss_nd_symint(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100)
Definition: Functions.h:12412
inline ::std::tuple< at::Tensor, at::Tensor > batch_norm_stats(const at::Tensor &input, double eps)
Definition: Functions.h:5094
at::Tensor & mean_outf(const at::Tensor &self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:4540
at::Tensor & asin_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:949
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _linalg_solve_ex_out(at::Tensor &result, at::Tensor &LU, at::Tensor &pivots, at::Tensor &info, const at::Tensor &A, const at::Tensor &B, bool left=true, bool check_errors=false)
Definition: Functions.h:18417
at::Tensor & cross_outf(const at::Tensor &self, const at::Tensor &other, c10::optional< int64_t > dim, at::Tensor &out)
Definition: Functions.h:9852
at::Tensor & special_log_ndtr_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17002
at::Tensor kl_div(const at::Tensor &self, const at::Tensor &target, int64_t reduction=at::Reduction::Mean, bool log_target=false)
Definition: Functions.h:3782
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > linalg_ldl_factor_ex(const at::Tensor &self, bool hermitian=false, bool check_errors=false)
Definition: Functions.h:17912
at::Tensor & _mps_convolution_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor &out)
Definition: Functions.h:21767
at::Tensor & addmm_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:8609
at::Tensor & isinf_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25294
at::Tensor slow_conv_transpose2d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:16433
at::Tensor mkldnn_max_pool2d_backward(const at::Tensor &grad_output, const at::Tensor &output, const at::Tensor &input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:4496
at::Tensor _spdiags(const at::Tensor &diagonals, const at::Tensor &offsets, at::IntArrayRef shape, c10::optional< at::Layout > layout=c10::nullopt)
Definition: Functions.h:8366
at::Tensor fft_irfft2(const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17609
at::Tensor & fft_irfft2_outf(const at::Tensor &self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17618
at::Tensor & _fft_c2r_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size)
Definition: Functions.h:3522
inline ::std::tuple< at::Tensor, at::Tensor > _rowwise_prune(const at::Tensor &weight, const at::Tensor &mask, at::ScalarType compressed_indices_dtype)
Definition: Functions.h:2487
at::Tensor nll_loss_symint(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100)
Definition: Functions.h:12434
at::Tensor triplet_margin_loss(const at::Tensor &anchor, const at::Tensor &positive, const at::Tensor &negative, double margin=1.0, double p=2, double eps=1e-06, bool swap=false, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:7886
at::Tensor rad2deg(const at::Tensor &self)
Definition: Functions.h:5391
void split_with_sizes_copy_symint_out(at::TensorList out, const at::Tensor &self, c10::SymIntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:19029
at::Tensor linalg_vector_norm(const at::Tensor &self, const at::Scalar &ord=2, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:18244
at::Tensor special_ndtr(const at::Tensor &self)
Definition: Functions.h:17137
at::Tensor constant_pad_nd(const at::Tensor &self, at::IntArrayRef pad, const at::Scalar &value=0)
Definition: Functions.h:1687
at::Tensor quantize_per_tensor_dynamic(const at::Tensor &self, at::ScalarType dtype, bool reduce_range)
Definition: Functions.h:8985
at::Tensor & _conj_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25429
at::Tensor & upsample_nearest2d_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15984
at::Tensor & exp_(at::Tensor &self)
Definition: Functions.h:2971
at::Tensor adaptive_avg_pool3d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13331
at::Tensor _adaptive_avg_pool3d_symint(const at::Tensor &self, c10::SymIntArrayRef output_size)
Definition: Functions.h:13364
at::Tensor & fft_ihfft_out(at::Tensor &out, const at::Tensor &self, c10::optional< int64_t > n=c10::nullopt, int64_t dim=-1, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17558
at::Tensor slow_conv_dilated3d_symint(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), at::IntArrayRef dilation=1)
Definition: Functions.h:16816
at::Tensor special_shifted_chebyshev_polynomial_v(const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19776
at::Tensor less(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10224
at::Tensor log1p(const at::Tensor &self)
Definition: Functions.h:4068
at::Tensor nll_loss2d_backward_symint(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor &total_weight)
Definition: Functions.h:12764
at::Tensor & _embedding_bag_dense_backward_symint_outf(const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offset2bag, const at::Tensor &bag_size, const at::Tensor &maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx, at::Tensor &out)
Definition: Functions.h:20822
inline ::std::tuple< at::Tensor, at::Tensor > _amp_update_scale(const at::Tensor &self, const at::Tensor &growth_tracker, const at::Tensor &found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval)
Definition: Functions.h:24416
at::Tensor & randn_like_outf(const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:22456
at::Tensor pad_sequence(at::TensorList sequences, bool batch_first=false, double padding_value=0.0)
Definition: Functions.h:18655
inline ::std::tuple< at::Tensor, at::Tensor > triangular_solve(const at::Tensor &self, const at::Tensor &A, bool upper=true, bool transpose=false, bool unitriangular=false)
Definition: Functions.h:10461
at::Tensor & full_symint_out(at::Tensor &out, c10::SymIntArrayRef size, const at::Scalar &fill_value)
Definition: Functions.h:3255
at::Tensor & remainder_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10984
at::Tensor trace_backward_symint(const at::Tensor &grad, c10::SymIntArrayRef sizes)
Definition: Functions.h:9924
at::Tensor & _upsample_nearest_exact2d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16017
at::Tensor & _test_autograd_multiple_dispatch_view_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25361
at::Tensor as_strided(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset=c10::nullopt)
Definition: Functions.h:895
at::Tensor signbit(const at::Tensor &self)
Definition: Functions.h:10753
at::Tensor & fft_ifft2_outf(const at::Tensor &self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17590
at::Tensor & nansum_outf(const at::Tensor &self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:7519
at::Tensor & _convert_indices_from_coo_to_csr_outf(const at::Tensor &self, int64_t size, bool out_int32, at::Tensor &out)
Definition: Functions.h:12235
inline ::std::tuple< at::Tensor &, at::Tensor & > min_out(at::Tensor &min, at::Tensor &min_indices, const at::Tensor &self, int64_t dim, bool keepdim=false)
Definition: Functions.h:4644
at::Tensor log_sigmoid_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &buffer)
Definition: Functions.h:13090
at::Tensor special_airy_ai(const at::Tensor &x)
Definition: Functions.h:19192
at::Tensor & copy_sparse_to_sparse_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &src, bool non_blocking=false)
Definition: Functions.h:23525
at::Tensor & upsample_nearest2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15819
at::Tensor & lift_fresh_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:23980
at::Tensor reflection_pad1d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13751
at::Tensor & special_i0_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17282
at::Tensor & native_dropout_backward_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &mask, double scale)
Definition: Functions.h:20075
at::Tensor linalg_lu_solve(const at::Tensor &LU, const at::Tensor &pivots, const at::Tensor &B, bool left=true, bool adjoint=false)
Definition: Functions.h:17865
void _foreach_atan_(at::TensorList self)
Definition: Functions.h:11878
at::Tensor _dim_arange(const at::Tensor &like, int64_t dim)
Definition: Functions.h:748
at::Tensor fft_ifft2(const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17581
inline ::std::vector< at::Tensor > unbind(const at::Tensor &self, int64_t dim=0)
Definition: Functions.h:8960
at::Tensor & _make_dual_copy_out(at::Tensor &out, const at::Tensor &primal, const at::Tensor &tangent, int64_t level)
Definition: Functions.h:25402
at::Tensor & new_full_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, const at::Scalar &fill_value)
Definition: Functions.h:20939
at::Tensor & quantized_batch_norm_out(at::Tensor &out, const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const at::Tensor &mean, const at::Tensor &var, double eps, double output_scale, int64_t output_zero_point)
Definition: Functions.h:20138
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor > _lstm_mps(const at::Tensor &input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first)
Definition: Functions.h:9209
at::Tensor & _nested_tensor_from_tensor_list_outf(at::TensorList list, c10::optional< at::ScalarType > dtype, c10::optional< at::Layout > layout, c10::optional< at::Device > device, c10::optional< bool > pin_memory, at::Tensor &out)
Definition: Functions.h:25388
inline ::std::tuple< at::Tensor, at::Tensor > _sparse_mm_reduce_impl(const at::Tensor &self, const at::Tensor &other, c10::string_view reduce)
Definition: Functions.h:8599
at::Tensor & _nested_view_from_buffer_copy_outf(const at::Tensor &self, const at::Tensor &nested_size, const at::Tensor &nested_strides, at::IntArrayRef offsets, at::Tensor &out)
Definition: Functions.h:22979
at::Tensor & diag_embed_outf(const at::Tensor &self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor &out)
Definition: Functions.h:20602
at::Tensor & slow_conv3d_symint_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor &out)
Definition: Functions.h:16684
at::Tensor & quantile_outf(const at::Tensor &self, const at::Tensor &q, c10::optional< int64_t > dim, bool keepdim, c10::string_view interpolation, at::Tensor &out)
Definition: Functions.h:11129
at::Tensor _make_dual_copy(const at::Tensor &primal, const at::Tensor &tangent, int64_t level)
Definition: Functions.h:18680
at::Tensor slow_conv_transpose3d_symint(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), at::IntArrayRef dilation=1)
Definition: Functions.h:16510
const at::Tensor & as_strided_(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset=c10::nullopt)
Definition: Functions.h:917
at::Tensor & _nested_view_from_buffer_copy_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &nested_size, const at::Tensor &nested_strides, at::IntArrayRef offsets)
Definition: Functions.h:22975
at::Tensor & roll_outf(const at::Tensor &self, at::IntArrayRef shifts, at::IntArrayRef dims, at::Tensor &out)
Definition: Functions.h:22907
at::Tensor & xlogy_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:4163
at::Tensor & hamming_window_out(at::Tensor &out, int64_t window_length)
Definition: Functions.h:21339
at::Tensor & _nested_tensor_strides_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:22957
inline ::std::tuple< at::Tensor, at::Tensor > _efficient_attention_forward(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, const c10::optional< at::Tensor > &cu_seqlens_q, const c10::optional< at::Tensor > &cu_seqlens_k, c10::optional< int64_t > max_seqlen_q, bool compute_log_sumexp=false, bool causal=false)
Definition: Functions.h:19172
at::Tensor & special_hermite_polynomial_he_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19497
at::Tensor logical_or(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:1243
at::Tensor softmax(const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:7156
void _validate_compressed_sparse_indices(bool is_crow, const at::Tensor &compressed_idx, const at::Tensor &plain_idx, int64_t cdim, int64_t dim, int64_t nnz)
Definition: Functions.h:3597
at::Tensor & slow_conv_transpose2d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:16400
at::Tensor & adaptive_avg_pool3d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size)
Definition: Functions.h:13309
at::Tensor & special_digamma_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17058
at::Tensor & softshrink_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &lambd=0.5)
Definition: Functions.h:13147
at::Tensor & _nested_tensor_size_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:22948
at::Tensor & col2im_outf(const at::Tensor &self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:16838
at::Tensor & heaviside_outf(const at::Tensor &self, const at::Tensor &values, at::Tensor &out)
Definition: Functions.h:8565
at::Tensor & _coalesced_outf(const at::Tensor &self, bool coalesced, at::Tensor &out)
Definition: Functions.h:23515
at::Tensor triu(const at::Tensor &self, int64_t diagonal=0)
Definition: Functions.h:9871
at::Tensor conv_depthwise3d_symint(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation)
Definition: Functions.h:16640
at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor &compressed_indices, const at::Tensor &plain_indices, const at::Tensor &values, at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:8727
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _efficient_attention_backward(const at::Tensor &grad_out_, const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, const at::Tensor &out, const at::Tensor &logsumexp, bool is_causal=false, bool chunk_grad_outputs=false)
Definition: Functions.h:19177
inline ::std::tuple< at::Tensor &, at::Tensor & > mode_outf(const at::Tensor &self, int64_t dim, bool keepdim, at::Tensor &values, at::Tensor &indices)
Definition: Functions.h:4857
at::Tensor special_i0(const at::Tensor &self)
Definition: Functions.h:17277
at::Tensor & linalg_pinv_out(at::Tensor &out, const at::Tensor &self, const c10::optional< at::Tensor > &atol={}, const c10::optional< at::Tensor > &rtol={}, bool hermitian=false)
Definition: Functions.h:18361
inline ::std::vector< at::Tensor > _foreach_zero(at::TensorList self)
Definition: Functions.h:24655
at::Tensor & _upsample_nearest_exact1d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15599
at::Tensor & polar_outf(const at::Tensor &abs, const at::Tensor &angle, at::Tensor &out)
Definition: Functions.h:1682
at::Tensor embedding_symint(const at::Tensor &weight, const at::Tensor &indices, c10::SymInt padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false)
Definition: Functions.h:2417
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _linalg_slogdet_outf(const at::Tensor &A, at::Tensor &sign, at::Tensor &logabsdet, at::Tensor &LU, at::Tensor &pivots)
Definition: Functions.h:18010
const at::Tensor & as_strided__symint(const at::Tensor &self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional< c10::SymInt > storage_offset=c10::nullopt)
Definition: Functions.h:928
at::Tensor & select_scatter_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &src, int64_t dim, c10::SymInt index)
Definition: Functions.h:22695
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor,::std::vector< at::Tensor > > _cudnn_rnn_backward_symint(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, const at::Tensor &output, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, const at::Tensor &reserve, ::std::array< bool, 4 > output_mask)
Definition: Functions.h:238
at::Tensor index_select(const at::Tensor &self, int64_t dim, const at::Tensor &index)
Definition: Functions.h:10280
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > unique_consecutive_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &self, bool return_inverse=false, bool return_counts=false, c10::optional< int64_t > dim=c10::nullopt)
Definition: Functions.h:23011
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor > _cudnn_rnn(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const c10::optional< at::Tensor > &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state)
Definition: Functions.h:205
at::Tensor & new_full_outf(const at::Tensor &self, at::IntArrayRef size, const at::Scalar &fill_value, at::Tensor &out)
Definition: Functions.h:20950
at::Tensor & _amp_update_scale_out(at::Tensor &out, const at::Tensor &self, at::Tensor &growth_tracker, const at::Tensor &found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval)
Definition: Functions.h:24407
at::Tensor log(const at::Tensor &self)
Definition: Functions.h:4030
void _foreach_log10_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24781
at::Tensor linalg_det(const at::Tensor &A)
Definition: Functions.h:17893
at::Tensor _fw_primal_copy(const at::Tensor &self, int64_t level)
Definition: Functions.h:18675
at::Tensor & floor_divide_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3147
at::Tensor & _adaptive_avg_pool2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:25047
at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional)
Definition: Functions.h:183
at::Tensor pinverse(const at::Tensor &self, double rcond=1e-15)
Definition: Functions.h:5381
at::Tensor upsample_bicubic2d_symint(const at::Tensor &input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14598
at::Tensor pad(const at::Tensor &self, at::IntArrayRef pad, c10::string_view mode="constant", c10::optional< double > value=c10::nullopt)
Definition: Functions.h:14477
at::Tensor & asinh_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:829
void _foreach_sub_out(at::TensorList out, at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:24430
at::Tensor gather_backward(const at::Tensor &grad, const at::Tensor &self, int64_t dim, const at::Tensor &index, bool sparse_grad)
Definition: Functions.h:10378
at::Tensor & log_sigmoid_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &buffer, at::Tensor &grad_input)
Definition: Functions.h:13085
at::Tensor & rrelu_with_noise_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &noise, const at::Scalar &lower, const at::Scalar &upper, bool training, bool self_is_result, at::Tensor &out)
Definition: Functions.h:25033
at::Tensor & dstack_out(at::Tensor &out, at::TensorList tensors)
Definition: Functions.h:7438
at::Tensor resize_as_sparse(const at::Tensor &self, const at::Tensor &the_template)
Definition: Functions.h:23339
void _cudnn_rnn_backward_symint_outf(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, const at::Tensor &output, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, const at::Tensor &reserve, ::std::array< bool, 4 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::TensorList out3)
Definition: Functions.h:20028
at::Tensor & linalg_tensorsolve_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other, at::OptionalIntArrayRef dims=c10::nullopt)
Definition: Functions.h:18473
void _foreach_norm_out(at::TensorList out, at::TensorList self, const at::Scalar &ord=2)
Definition: Functions.h:24957
at::Tensor _mps_convolution_transpose(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:1980
void _foreach_expm1_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24750
at::Tensor & _indices_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25761
at::Tensor _neg_view_copy(const at::Tensor &self)
Definition: Functions.h:18700
void _foreach_sub_outf(at::TensorList self, const at::Scalar &scalar, at::TensorList out)
Definition: Functions.h:24434
at::Tensor _sparse_sparse_matmul(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4843
at::Tensor diff(const at::Tensor &self, int64_t n=1, int64_t dim=-1, const c10::optional< at::Tensor > &prepend={}, const c10::optional< at::Tensor > &append={})
Definition: Functions.h:2229
at::Tensor nanmean(const at::Tensor &self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:4559
at::Tensor & divide_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:2321
at::Tensor & igammac_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:10956
at::Tensor & uniform_out(at::Tensor &out, const at::Tensor &self, double from=0, double to=1, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:24180
at::Tensor & expand_copy_symint_outf(const at::Tensor &self, c10::SymIntArrayRef size, bool implicit, at::Tensor &out)
Definition: Functions.h:25542
at::Tensor sin(const at::Tensor &self)
Definition: Functions.h:6959
at::Tensor & logit_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, c10::optional< double > eps, at::Tensor &grad_input)
Definition: Functions.h:16365
at::Tensor & special_shifted_chebyshev_polynomial_v_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19791
void _foreach_div_out(at::TensorList out, at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:24448
at::Tensor & heaviside_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &values)
Definition: Functions.h:8561
at::Tensor hardshrink(const at::Tensor &self, const at::Scalar &lambd=0.5)
Definition: Functions.h:6735
at::Tensor special_modified_bessel_i0(const at::Tensor &self)
Definition: Functions.h:19608
at::Tensor mkldnn_adaptive_avg_pool2d_backward(const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:13255
int64_t q_zero_point(const at::Tensor &self)
Definition: Functions.h:9025
at::Tensor quantized_max_pool1d(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:4511
at::Tensor nll_loss2d_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, const at::Tensor &total_weight)
Definition: Functions.h:12753
at::Tensor & log_softmax_outf(const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:4213
at::Tensor upsample_nearest3d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16303
at::Tensor & segment_reduce_outf(const at::Tensor &data, c10::string_view reduce, const c10::optional< at::Tensor > &lengths, const c10::optional< at::Tensor > &indices, const c10::optional< at::Tensor > &offsets, int64_t axis, bool unsafe, const c10::optional< at::Scalar > &initial, at::Tensor &out)
Definition: Functions.h:25370
at::Tensor & hardtanh_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &min_val, const at::Scalar &max_val, at::Tensor &grad_input)
Definition: Functions.h:12981
at::Tensor & batch_norm_backward_elemt_outf(const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &invstd, const c10::optional< at::Tensor > &weight, const at::Tensor &mean_dy, const at::Tensor &mean_dy_xmu, const at::Tensor &count, at::Tensor &out)
Definition: Functions.h:22083
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _convolution_double_backward(const c10::optional< at::Tensor > &ggI, const c10::optional< at::Tensor > &ggW, const c10::optional< at::Tensor > &ggb, const at::Tensor &gO, const at::Tensor &weight, const at::Tensor &self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:1795
at::Tensor & multilabel_margin_loss_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction, const at::Tensor &is_target, at::Tensor &grad_input)
Definition: Functions.h:12347
at::Tensor & nan_to_num_(at::Tensor &self, c10::optional< double > nan=c10::nullopt, c10::optional< double > posinf=c10::nullopt, c10::optional< double > neginf=c10::nullopt)
Definition: Functions.h:3900
at::Tensor & max_pool2d_backward_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:21682
void split_copy_outf(const at::Tensor &self, int64_t split_size, int64_t dim, at::TensorList out)
Definition: Functions.h:18974
at::Tensor & silu_(at::Tensor &self)
Definition: Functions.h:6869
at::Tensor & fft_ifft_out(at::Tensor &out, const at::Tensor &self, c10::optional< int64_t > n=c10::nullopt, int64_t dim=-1, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17502
at::Tensor & quantize_per_channel_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &scales, const at::Tensor &zero_points, int64_t axis, at::ScalarType dtype)
Definition: Functions.h:23656
at::Tensor & _upsample_bilinear2d_aa_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15060
at::Tensor & argmax_outf(const at::Tensor &self, c10::optional< int64_t > dim, bool keepdim, at::Tensor &out)
Definition: Functions.h:762
at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional< at::DimnameList > names, at::TensorOptions options={})
Definition: Functions.h:5460
at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options={}, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:2623
at::Tensor & zeros_symint_out(at::Tensor &out, c10::SymIntArrayRef size)
Definition: Functions.h:8220
at::Tensor & slow_conv3d_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0))
Definition: Functions.h:16673
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_batch_norm_out(at::Tensor &out, at::Tensor &save_mean, at::Tensor &save_invstd, const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, bool training, double momentum, double eps)
Definition: Functions.h:5057
at::Tensor & frac_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:3175
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _linalg_svd(const at::Tensor &A, bool full_matrices=false, bool compute_uv=true, c10::optional< c10::string_view > driver=c10::nullopt)
Definition: Functions.h:18286
inline ::std::tuple< at::Tensor &, at::Tensor & > sort_out(at::Tensor &values, at::Tensor &indices, const at::Tensor &self, int64_t dim=-1, bool descending=false)
Definition: Functions.h:11176
inline ::std::tuple< at::Tensor, at::Tensor > nll_loss_forward(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index)
Definition: Functions.h:12489
at::Tensor & mkldnn_max_pool3d_outf(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor &out)
Definition: Functions.h:21713
at::Tensor & zero_(at::Tensor &self)
Definition: Functions.h:8513
at::Tensor & bitwise_or_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:9648
inline ::std::tuple< at::Tensor &, at::Tensor & > multilabel_margin_loss_forward_outf(const at::Tensor &self, const at::Tensor &target, int64_t reduction, at::Tensor &output, at::Tensor &is_target)
Definition: Functions.h:12333
at::Tensor max_pool3d(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:4521
at::Tensor & arccos_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:512
at::Tensor & fft_fftn_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17656
at::Tensor & mkldnn_reorder_conv2d_weight_outf(const at::Tensor &self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor &out)
Definition: Functions.h:23606
at::Tensor cartesian_prod(at::TensorList tensors)
Definition: Functions.h:9164
at::Tensor & log1p_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:4078
at::Tensor take(const at::Tensor &self, const at::Tensor &index)
Definition: Functions.h:10252
at::Tensor squeeze_copy(const at::Tensor &self)
Definition: Functions.h:18879
at::Tensor adaptive_avg_pool2d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13219
at::Tensor cumulative_trapezoid(const at::Tensor &y, const at::Tensor &x, int64_t dim=-1)
Definition: Functions.h:2142
at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales, at::Tensor &grad_input)
Definition: Functions.h:15742
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _linalg_svd_out(at::Tensor &U, at::Tensor &S, at::Tensor &Vh, const at::Tensor &A, bool full_matrices=false, bool compute_uv=true, c10::optional< c10::string_view > driver=c10::nullopt)
Definition: Functions.h:18291
at::Tensor logaddexp2(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4129
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > lu_unpack_out(at::Tensor &P, at::Tensor &L, at::Tensor &U, const at::Tensor &LU_data, const at::Tensor &LU_pivots, bool unpack_data=true, bool unpack_pivots=true)
Definition: Functions.h:10641
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _thnn_fused_gru_cell_backward_outf(const at::Tensor &grad_hy, const at::Tensor &workspace, bool has_bias, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4)
Definition: Functions.h:23845
at::Tensor & special_zeta_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:17250
at::Tensor _convolution_symint(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32)
Definition: Functions.h:1774
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > miopen_batch_norm_backward(const at::Tensor &input, const at::Tensor &grad_output, const at::Tensor &weight, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, const c10::optional< at::Tensor > &save_mean, const c10::optional< at::Tensor > &save_var, double epsilon)
Definition: Functions.h:4728
at::Tensor & _triton_scaled_dot_attention_out(at::Tensor &out, const at::Tensor &q, const at::Tensor &k, const at::Tensor &v, double dropout_p=0.0)
Definition: Functions.h:25962
at::Tensor special_chebyshev_polynomial_v(const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19356
at::Tensor & upsample_nearest1d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15577
inline ::std::vector< at::Tensor > _foreach_tan(at::TensorList self)
Definition: Functions.h:12003
inline ::std::vector< at::Tensor > unsafe_split_with_sizes(const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:7269
at::Tensor replication_pad3d_backward_symint(const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14422
void _foreach_clamp_max_outf(at::TensorList self, const at::Scalar &scalar, at::TensorList out)
Definition: Functions.h:24470
const at::Tensor & fft_hfftn_outf(const at::Tensor &self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional< c10::string_view > norm, const at::Tensor &out)
Definition: Functions.h:17716
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _scaled_dot_product_flash_attention_backward(const at::Tensor &grad_out, const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, const at::Tensor &out, const at::Tensor &logsumexp, const at::Tensor &cum_seq_q, const at::Tensor &cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset)
Definition: Functions.h:19142
inline ::std::tuple< at::Tensor, at::Tensor > aminmax(const at::Tensor &self, c10::optional< int64_t > dim=c10::nullopt, bool keepdim=false)
Definition: Functions.h:4379
at::Tensor hardswish(const at::Tensor &self)
Definition: Functions.h:13005
void _foreach_atan_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24700
void _foreach_sin_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24831
at::Tensor _adaptive_avg_pool3d_backward(const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:13384
at::Tensor moveaxis(const at::Tensor &self, at::IntArrayRef source, at::IntArrayRef destination)
Definition: Functions.h:5341
at::Tensor & zeros_like_outf(const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:23131
at::Tensor & empty_strided_out(at::Tensor &out, at::IntArrayRef size, at::IntArrayRef stride)
Definition: Functions.h:21187
at::Tensor & linalg_tensorinv_out(at::Tensor &out, const at::Tensor &self, int64_t ind=2)
Definition: Functions.h:18459
void _fused_adamw_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional< at::Tensor > &grad_scale={}, const c10::optional< at::Tensor > &found_inf={})
Definition: Functions.h:19884
at::Tensor fake_quantize_per_tensor_affine_cachemask_backward(const at::Tensor &grad, const at::Tensor &mask)
Definition: Functions.h:9080
at::Tensor __dispatch_conj(const at::Tensor &self)
Definition: Functions.h:439
at::Tensor & less_equal_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10103
at::Tensor & _pdist_backward_outf(const at::Tensor &grad, const at::Tensor &self, double p, const at::Tensor &pdist, at::Tensor &out)
Definition: Functions.h:22199
void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:24466
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor > _thnn_fused_gru_cell_backward(const at::Tensor &grad_hy, const at::Tensor &workspace, bool has_bias)
Definition: Functions.h:9244
at::Tensor all(const at::Tensor &self, int64_t dim, bool keepdim=false)
Definition: Functions.h:642
at::Tensor upsample_bicubic2d_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15280
at::Tensor & arctan_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:1010
at::Tensor & set_symint_outf(const at::Tensor &self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor &out)
Definition: Functions.h:23906
at::Tensor & trunc_(at::Tensor &self)
Definition: Functions.h:7896
at::Tensor & hardtanh_outf(const at::Tensor &self, const at::Scalar &min_val, const at::Scalar &max_val, at::Tensor &out)
Definition: Functions.h:12967
at::Tensor index_copy(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &source)
Definition: Functions.h:3645
at::Tensor special_expm1(const at::Tensor &self)
Definition: Functions.h:17011
at::Tensor & threshold_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &threshold, at::Tensor &grad_input)
Definition: Functions.h:7761
inline ::std::vector< at::Tensor > _foreach_mul(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11598
at::Tensor & _foobar_outf(const at::Tensor &self, bool arg1, bool arg2, bool arg3, at::Tensor &out)
Definition: Functions.h:26002
at::Tensor & dropout_(at::Tensor &self, double p, bool train)
Definition: Functions.h:318
at::Tensor hardsigmoid_backward(const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:12958
at::Tensor fliplr(const at::Tensor &self)
Definition: Functions.h:7806
at::Tensor _embedding_bag_sparse_backward(const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offsets, const at::Tensor &offset2bag, const at::Tensor &bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx=-1)
Definition: Functions.h:2543
at::Tensor geometric(const at::Tensor &self, double p, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:24245
at::Tensor & ones_like_outf(const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:22154
at::Tensor diagonal_scatter(const at::Tensor &self, const at::Tensor &src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1)
Definition: Functions.h:7124
at::Tensor special_log_softmax(const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:17431
at::Tensor & vstack_out(at::Tensor &out, at::TensorList tensors)
Definition: Functions.h:7424
at::Tensor tanh(const at::Tensor &self)
Definition: Functions.h:7705
at::Tensor & mse_loss_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction)
Definition: Functions.h:12268
at::Tensor & avg_pool2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional< int64_t > divisor_override=c10::nullopt)
Definition: Functions.h:13445
at::Tensor & reflection_pad3d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef padding, at::Tensor &out)
Definition: Functions.h:13938
inline ::std::tuple< at::Tensor &, at::Tensor & > batch_norm_stats_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &input, double eps)
Definition: Functions.h:22034
at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor &indices, const at::Tensor &values, at::Tensor &out)
Definition: Functions.h:23423
at::Tensor & empty_quantized_out(at::Tensor &out, at::IntArrayRef size, const at::Tensor &qtensor, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:21169
at::Tensor & _masked_softmax_outf(const at::Tensor &self, const at::Tensor &mask, c10::optional< int64_t > dim, c10::optional< int64_t > mask_type, at::Tensor &out)
Definition: Functions.h:24016
at::Tensor & softplus_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &beta, const at::Scalar &threshold, at::Tensor &grad_input)
Definition: Functions.h:13137
at::Tensor _pad_circular_symint(const at::Tensor &self, c10::SymIntArrayRef pad)
Definition: Functions.h:14444
inline ::std::tuple< at::Tensor &, at::Tensor & > cummin_outf(const at::Tensor &self, int64_t dim, at::Tensor &values, at::Tensor &indices)
Definition: Functions.h:2052
at::Tensor & _upsample_nearest_exact1d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15621
at::Tensor & _adaptive_avg_pool3d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25148
at::Tensor & gcd_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:3299
bool is_inference(const Tensor &tensor)
Definition: Functions.h:26074
at::Tensor & _add_relu_(at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha=1)
Definition: Functions.h:555
at::Tensor & tril_out(at::Tensor &out, const at::Tensor &self, int64_t diagonal=0)
Definition: Functions.h:9876
at::Tensor & _histogramdd_from_bin_tensors_outf(const at::Tensor &self, at::TensorList bins, const c10::optional< at::Tensor > &weight, bool density, at::Tensor &out)
Definition: Functions.h:24317
at::Tensor & sign_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:10748
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor > _cudnn_rnn_symint(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const c10::optional< at::Tensor > &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state)
Definition: Functions.h:216
at::Tensor zeros(at::IntArrayRef size, c10::optional< at::DimnameList > names, at::TensorOptions options={})
Definition: Functions.h:8136
at::Tensor & relu_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:22523
at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor &input, at::OptionalSymIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14664
at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:25038
at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(at::Tensor &out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor &indices, const at::Tensor &values)
Definition: Functions.h:23412
at::Tensor & ger_outf(const at::Tensor &self, const at::Tensor &vec2, at::Tensor &out)
Definition: Functions.h:18211
at::Tensor & deg2rad_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:5424
inline ::std::tuple< at::Tensor, at::Tensor > frexp(const at::Tensor &self)
Definition: Functions.h:8437
at::Tensor square(const at::Tensor &self)
Definition: Functions.h:7543
at::Tensor atanh(const at::Tensor &self)
Definition: Functions.h:857
at::Tensor _test_optional_intlist(const at::Tensor &values, at::OptionalIntArrayRef addends)
Definition: Functions.h:18590
at::Tensor mean(const at::Tensor &self, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:4526
at::Tensor & _sparse_broadcast_to_copy_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:25491
inline ::std::tuple< at::Tensor, at::Tensor > choose_qparams_optimized(const at::Tensor &input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width)
Definition: Functions.h:9140
const at::Tensor & fft_hfftn_out(const at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17712
at::Tensor & _logcumsumexp_outf(const at::Tensor &self, int64_t dim, at::Tensor &out)
Definition: Functions.h:4260
at::Tensor positive(const at::Tensor &self)
Definition: Functions.h:8498
at::Tensor & index_select_outf(const at::Tensor &self, int64_t dim, const at::Tensor &index, at::Tensor &out)
Definition: Functions.h:10275
at::Tensor linalg_diagonal(const at::Tensor &A, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1)
Definition: Functions.h:2197
at::Tensor & _upsample_bilinear2d_aa_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15093
inline ::std::tuple< at::Tensor &, at::Tensor & > qr_out(at::Tensor &Q, at::Tensor &R, const at::Tensor &self, bool some=true)
Definition: Functions.h:10561
at::Tensor & _fft_c2c_symint_outf(const at::Tensor &self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor &out)
Definition: Functions.h:3586
at::Tensor & fft_rfft_out(at::Tensor &out, const at::Tensor &self, c10::optional< int64_t > n=c10::nullopt, int64_t dim=-1, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17516
at::Tensor cumprod(const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:2081
at::Tensor _sparse_softmax_backward_data(const at::Tensor &grad_output, const at::Tensor &output, int64_t dim, const at::Tensor &self)
Definition: Functions.h:8341
at::Tensor & sinh_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:7007
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _unique2_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &self, bool sorted=true, bool return_inverse=false, bool return_counts=false)
Definition: Functions.h:23029
at::Tensor & reflection_pad1d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13641
inline ::std::vector< at::Tensor > _foreach_maximum(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11638
at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15797
at::Tensor & ceil_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:1424
at::Tensor linalg_solve_triangular(const at::Tensor &self, const at::Tensor &B, bool upper, bool left=true, bool unitriangular=false)
Definition: Functions.h:10480
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > convolution_backward_overrideable(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:1758
at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options={})
Definition: Functions.h:3422
at::Tensor & erfc_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:2957
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _thnn_fused_lstm_cell_backward_impl_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, const at::Tensor &cx, const at::Tensor &cy, const at::Tensor &workspace, bool has_bias)
Definition: Functions.h:23823
at::Tensor & index_add_out(at::Tensor &out, const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &source, const at::Scalar &alpha=1)
Definition: Functions.h:9453
at::Tensor _remove_batch_dim(const at::Tensor &self, int64_t level, int64_t batch_size, int64_t out_dim)
Definition: Functions.h:16964
at::Tensor dropout(const at::Tensor &input, double p, bool train)
Definition: Functions.h:313
at::Tensor & neg_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:6460
at::Tensor & _to_dense_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:23493
at::Tensor & _sample_dirichlet_outf(const at::Tensor &self, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:23167
at::Tensor linalg_matrix_exp(const at::Tensor &self)
Definition: Functions.h:17996
at::Tensor sparse_bsr_tensor(const at::Tensor &crow_indices, const at::Tensor &col_indices, const at::Tensor &values, at::IntArrayRef size, at::TensorOptions options)
Definition: Functions.h:8664
at::Tensor index_select_backward_symint(const at::Tensor &grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor &index)
Definition: Functions.h:10310
at::Tensor & fft_fft2_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17572
at::Tensor _nested_view_from_buffer_copy(const at::Tensor &self, const at::Tensor &nested_size, const at::Tensor &nested_strides, at::IntArrayRef offsets)
Definition: Functions.h:7876
at::Tensor cross(const at::Tensor &self, const at::Tensor &other, c10::optional< int64_t > dim=c10::nullopt)
Definition: Functions.h:9857
at::Tensor & addcdiv_outf(const at::Tensor &self, const at::Tensor &tensor1, const at::Tensor &tensor2, const at::Scalar &value, at::Tensor &out)
Definition: Functions.h:10420
void _foreach_trunc_(at::TensorList self)
Definition: Functions.h:12098
at::Tensor _test_optional_floatlist(const at::Tensor &values, c10::optional< at::ArrayRef< double > > addends)
Definition: Functions.h:18600
at::Tensor & gelu_out(at::Tensor &out, const at::Tensor &self, c10::string_view approximate="none")
Definition: Functions.h:6688
inline ::std::tuple< at::Tensor, at::Tensor, double, int64_t > fbgemm_linear_quantize_weight(const at::Tensor &input)
Definition: Functions.h:3963
at::Tensor & _test_optional_filled_intlist_out(at::Tensor &out, const at::Tensor &values, at::OptionalIntArrayRef addends)
Definition: Functions.h:25321
at::Tensor & _efficientzerotensor_outf(at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:23122
at::Tensor & igamma_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:10942
void split_with_sizes_copy_outf(const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out)
Definition: Functions.h:19018
at::Tensor select_copy_symint(const at::Tensor &self, int64_t dim, c10::SymInt index)
Definition: Functions.h:18797
at::Tensor special_i0e(const at::Tensor &self)
Definition: Functions.h:17291
at::Tensor & quantize_per_tensor_dynamic_outf(const at::Tensor &self, at::ScalarType dtype, bool reduce_range, at::Tensor &out)
Definition: Functions.h:23624
at::Tensor & set_outf(const at::Tensor &self, at::Storage source, at::Tensor &out)
Definition: Functions.h:23863
at::Tensor detach(const at::Tensor &self)
Definition: Functions.h:7016
inline ::std::tuple< at::Tensor &, at::Tensor & > median_outf(const at::Tensor &self, int64_t dim, bool keepdim, at::Tensor &values, at::Tensor &indices)
Definition: Functions.h:4587
void _foreach_log_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24768
at::Tensor & upsample_nearest1d_outf(const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales, at::Tensor &out)
Definition: Functions.h:15566
at::Tensor & sinc_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:6988
at::Tensor upsample_bilinear2d(const at::Tensor &input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14521
at::Tensor mish(const at::Tensor &self)
Definition: Functions.h:6897
at::Tensor & _sparse_csr_sum_outf(const at::Tensor &self, at::IntArrayRef dim, bool keepdim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:23230
at::Tensor & slow_conv_transpose3d_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), at::IntArrayRef dilation=1)
Definition: Functions.h:16477
at::Tensor _make_per_tensor_quantized_tensor(const at::Tensor &self, double scale, int64_t zero_point)
Definition: Functions.h:9050
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > miopen_batch_norm_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &input, const at::Tensor &grad_output, const at::Tensor &weight, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, const c10::optional< at::Tensor > &save_mean, const c10::optional< at::Tensor > &save_var, double epsilon)
Definition: Functions.h:21852
at::Tensor & view_copy_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef size)
Definition: Functions.h:25851
at::Tensor & hardtanh_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &min_val=-1, const at::Scalar &max_val=1)
Definition: Functions.h:12963
at::Tensor & special_bessel_y1_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:19263
void _foreach_reciprocal_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24880
at::Tensor prod(const at::Tensor &self, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:7648
at::Tensor cos(const at::Tensor &self)
Definition: Functions.h:1887
at::Tensor & logical_or_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:1252
at::Tensor & alpha_dropout_(at::Tensor &self, double p, bool train)
Definition: Functions.h:338
at::Tensor upsample_nearest2d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16039
at::Tensor value_selecting_reduction_backward(const at::Tensor &grad, int64_t dim, const at::Tensor &indices, at::IntArrayRef sizes, bool keepdim)
Definition: Functions.h:4435
at::Tensor & soft_margin_loss_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction)
Definition: Functions.h:12845
at::Tensor & scatter_reduce_out(at::Tensor &out, const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &src, c10::string_view reduce, bool include_self=true)
Definition: Functions.h:9596
at::Tensor & _fft_r2c_outf(const at::Tensor &self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor &out)
Definition: Functions.h:3512
at::Tensor & quantized_max_pool2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:21736
at::Tensor multilabel_margin_loss_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction, const at::Tensor &is_target)
Definition: Functions.h:12352
at::Tensor arange(const at::Scalar &end, at::TensorOptions options={})
Definition: Functions.h:703
at::Tensor & special_logit_out(at::Tensor &out, const at::Tensor &self, c10::optional< double > eps=c10::nullopt)
Definition: Functions.h:17338
void _foreach_addcmul_out(at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar &value=1)
Definition: Functions.h:24912
at::Tensor _fake_quantize_learnable_per_tensor_affine(const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0)
Definition: Functions.h:9085
at::Tensor & _values_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25770
at::Tensor & geometric_outf(const at::Tensor &self, double p, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:24240
at::Tensor & mkldnn_adaptive_avg_pool2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25042
at::Tensor _pdist_backward(const at::Tensor &grad, const at::Tensor &self, double p, const at::Tensor &pdist)
Definition: Functions.h:5316
at::Tensor & grid_sampler_2d_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners)
Definition: Functions.h:21276
at::Tensor & new_empty_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:20851
at::Tensor & unfold_backward_symint_out(at::Tensor &out, const at::Tensor &grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step)
Definition: Functions.h:24362
at::Tensor & _euclidean_dist_out(at::Tensor &out, const at::Tensor &x1, const at::Tensor &x2)
Definition: Functions.h:22159
at::Tensor unfold_backward_symint(const at::Tensor &grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step)
Definition: Functions.h:11328
at::Tensor fbgemm_linear_fp16_weight_fp32_activation(const at::Tensor &input, const at::Tensor &packed_weight, const at::Tensor &bias)
Definition: Functions.h:3973
at::Tensor embedding_dense_backward_symint(const at::Tensor &grad_output, const at::Tensor &indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq)
Definition: Functions.h:2461
at::Tensor & _test_optional_filled_intlist_outf(const at::Tensor &values, at::OptionalIntArrayRef addends, at::Tensor &out)
Definition: Functions.h:25325
at::Tensor scaled_dot_product_attention(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, const c10::optional< at::Tensor > &attn_mask={}, double dropout_p=0.0, bool is_causal=false)
Definition: Functions.h:19117
at::Tensor as_strided_scatter(const at::Tensor &self, const at::Tensor &src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset=c10::nullopt)
Definition: Functions.h:7129
at::Tensor randperm(int64_t n, at::TensorOptions options=at::kLong)
Definition: Functions.h:6354
at::Tensor isinf(const at::Tensor &self)
Definition: Functions.h:16926
at::Tensor & multiply_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:4904
const at::Tensor & fft_hfft2_out(const at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17628
at::Tensor & kron_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3792
at::Tensor & _mps_convolution_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:21763
at::Tensor & huber_loss_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction, double delta, at::Tensor &grad_input)
Definition: Functions.h:12821
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _linalg_solve_ex_outf(const at::Tensor &A, const at::Tensor &B, bool left, bool check_errors, at::Tensor &result, at::Tensor &LU, at::Tensor &pivots, at::Tensor &info)
Definition: Functions.h:18421
bool is_floating_point(const Tensor &tensor)
Definition: Functions.h:26066
void split_with_sizes_copy_symint_outf(const at::Tensor &self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out)
Definition: Functions.h:19040
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > conv_tbc_backward(const at::Tensor &self, const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, int64_t pad)
Definition: Functions.h:1852
void _validate_sparse_bsc_tensor_args(const at::Tensor &ccol_indices, const at::Tensor &row_indices, const at::Tensor &values, at::IntArrayRef size)
Definition: Functions.h:8868
at::Tensor lift_fresh(const at::Tensor &self)
Definition: Functions.h:9399
at::Tensor & nuclear_norm_outf(const at::Tensor &self, bool keepdim, at::Tensor &out)
Definition: Functions.h:8474
at::Tensor logit(const at::Tensor &self, c10::optional< double > eps=c10::nullopt)
Definition: Functions.h:6940
at::Tensor histc(const at::Tensor &self, int64_t bins=100, const at::Scalar &min=0, const at::Scalar &max=0)
Definition: Functions.h:10837
at::Tensor & _sparse_sum_outf(const at::Tensor &self, at::IntArrayRef dim, at::Tensor &out)
Definition: Functions.h:23212
at::Tensor & _coalesce_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:23506
inline ::std::tuple< at::Tensor &, at::Tensor & > _fused_dropout_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &self, double p, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:20048
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_qr_out(at::Tensor &Q, at::Tensor &R, const at::Tensor &A, c10::string_view mode="reduced")
Definition: Functions.h:18487
at::Tensor & rad2deg_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:5405
at::Tensor view_as_complex_copy(const at::Tensor &self)
Definition: Functions.h:18690
at::Tensor _reshape_from_tensor(const at::Tensor &self, const at::Tensor &shape)
Definition: Functions.h:303
at::Tensor one_hot(const at::Tensor &self, int64_t num_classes=-1)
Definition: Functions.h:7796
at::Tensor upsample_nearest1d_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15786
at::Tensor & masked_select_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mask)
Definition: Functions.h:10321
at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor &self, at::IntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:13186
at::Tensor feature_alpha_dropout(const at::Tensor &input, double p, bool train)
Definition: Functions.h:343
at::Tensor & eq_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:9995
at::Tensor special_psi(const at::Tensor &self)
Definition: Functions.h:17039
at::Tensor & batch_norm_elemt_out(at::Tensor &out, const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const at::Tensor &mean, const at::Tensor &invstd, double eps)
Definition: Functions.h:5104
bool is_signed(const Tensor &tensor)
Definition: Functions.h:26070
at::Tensor imag(const at::Tensor &self)
Definition: Functions.h:429
void _foreach_cosh_(at::TensorList self)
Definition: Functions.h:11908
at::Tensor special_modified_bessel_i1(const at::Tensor &self)
Definition: Functions.h:19622
at::Tensor smooth_l1_loss_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction, double beta)
Definition: Functions.h:12798
at::Tensor & special_xlog1py_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:17170
at::Tensor & _sparse_coo_tensor_with_dims_outf(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:23407
at::Tensor select_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index)
Definition: Functions.h:6811
at::Tensor & quantize_per_tensor_dynamic_out(at::Tensor &out, const at::Tensor &self, at::ScalarType dtype, bool reduce_range)
Definition: Functions.h:23620
at::Tensor & minimum_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:11101
at::Tensor & exp_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:2976
at::Tensor & atan_(at::Tensor &self)
Definition: Functions.h:982
inline ::std::vector< at::Tensor > unsafe_split_with_sizes_symint(const at::Tensor &self, c10::SymIntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:7280
at::Tensor __rshift__(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9777
at::Tensor & bitwise_xor_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:9695
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor > _native_decoder_only_multi_head_attention(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, int64_t embed_dim, int64_t num_head, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, const c10::optional< at::Tensor > &mask={}, const c10::optional< at::Tensor > &incr_key={}, const c10::optional< at::Tensor > &incr_value={}, bool need_weights=true, bool average_attn_weights=true)
Definition: Functions.h:19211
at::Tensor & _fft_c2c_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim, int64_t normalization, bool forward)
Definition: Functions.h:3553
at::Tensor conv_transpose3d(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1)
Definition: Functions.h:1867
at::Tensor & replication_pad3d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:14378
at::Tensor & select_backward_outf(const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index, at::Tensor &out)
Definition: Functions.h:22543
at::Tensor & _index_put_impl_(at::Tensor &self, const c10::List< c10::optional< at::Tensor > > &indices, const at::Tensor &values, bool accumulate=false, bool unsafe=false)
Definition: Functions.h:3665
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor > _thnn_fused_lstm_cell_backward(const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, const at::Tensor &cx, const at::Tensor &cy, const at::Tensor &workspace, bool has_bias)
Definition: Functions.h:9229
inline ::std::tuple< at::Tensor, at::Tensor > geqrf(const at::Tensor &self)
Definition: Functions.h:10584
at::Tensor & _trilinear_outf(const at::Tensor &i1, const at::Tensor &i2, const at::Tensor &i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor &out)
Definition: Functions.h:22988
at::Tensor & arccosh_(at::Tensor &self)
Definition: Functions.h:805
at::Tensor _sparse_sum(const at::Tensor &self)
Definition: Functions.h:8291
at::Tensor special_softmax(const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:17478
int64_t __dispatch_size(const at::Tensor &self, int64_t dim)
Definition: Functions.h:7026
inline ::std::tuple< at::Tensor, at::Tensor > slogdet(const at::Tensor &self)
Definition: Functions.h:18029
at::Tensor & pixel_shuffle_outf(const at::Tensor &self, int64_t upscale_factor, at::Tensor &out)
Definition: Functions.h:22208
inline ::std::tuple< at::Tensor, at::Tensor > max_pool2d_with_indices(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:13566
void _foreach_sub_(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11593
at::Tensor & soft_margin_loss_outf(const at::Tensor &self, const at::Tensor &target, int64_t reduction, at::Tensor &out)
Definition: Functions.h:12835
inline ::std::tuple< at::Tensor, at::Tensor > linalg_lu_factor(const at::Tensor &A, bool pivot=true)
Definition: Functions.h:17823
at::Tensor & fft_irfftn_outf(const at::Tensor &self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17702
inline ::std::tuple< at::Tensor &, at::Tensor & > mps_convolution_transpose_backward_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &self, const at::Tensor &grad_output, const at::Tensor &weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array< bool, 2 > output_mask)
Definition: Functions.h:20526
at::Tensor select_scatter(const at::Tensor &self, const at::Tensor &src, int64_t dim, int64_t index)
Definition: Functions.h:7102
at::Tensor & relu_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:22527
inline ::std::tuple< at::Tensor, at::Tensor > rnn_tanh(const at::Tensor &input, const at::Tensor &hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first)
Definition: Functions.h:9274
at::Tensor & _convolution_symint_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32)
Definition: Functions.h:20387
void _cufft_set_plan_cache_max_size(int64_t device_index, int64_t max_size)
Definition: Functions.h:3612
at::Tensor & take_outf(const at::Tensor &self, const at::Tensor &index, at::Tensor &out)
Definition: Functions.h:10247
at::Tensor & _cdist_backward_outf(const at::Tensor &grad, const at::Tensor &x1, const at::Tensor &x2, double p, const at::Tensor &cdist, at::Tensor &out)
Definition: Functions.h:22181
at::Tensor & to_sparse_bsc_outf(const at::Tensor &self, at::IntArrayRef blocksize, c10::optional< int64_t > dense_dim, at::Tensor &out)
Definition: Functions.h:23588
at::Tensor & sinc_(at::Tensor &self)
Definition: Functions.h:6983
void _cudnn_rnn_backward_outf(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, const at::Tensor &output, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, const at::Tensor &reserve, ::std::array< bool, 4 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::TensorList out3)
Definition: Functions.h:20006
at::Tensor & prod_out(at::Tensor &out, const at::Tensor &self, int64_t dim, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:7658
inline ::std::tuple< at::Tensor &, at::Tensor & > batch_norm_gather_stats_outf(const at::Tensor &input, const at::Tensor &mean, const at::Tensor &invstd, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, double momentum, double eps, int64_t count, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:22047
at::Tensor & column_stack_outf(at::TensorList tensors, at::Tensor &out)
Definition: Functions.h:16902
void _fused_adamw_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional< at::Tensor > &grad_scale, const c10::optional< at::Tensor > &found_inf, at::TensorList out)
Definition: Functions.h:26025
inline ::std::tuple< at::Tensor &, at::Tensor & > _fused_dropout_outf(const at::Tensor &self, double p, c10::optional< at::Generator > generator, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:20052
at::Tensor _shape_as_tensor(const at::Tensor &self)
Definition: Functions.h:308
at::Tensor & _fake_quantize_learnable_per_channel_affine_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0)
Definition: Functions.h:23764
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > linear_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &self, const at::Tensor &grad_output, const at::Tensor &weight, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:21610
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor > _native_batch_norm_legit_functional(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const at::Tensor &running_mean, const at::Tensor &running_var, bool training, double momentum, double eps)
Definition: Functions.h:22029
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _cudnn_rnn_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4, const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const c10::optional< at::Tensor > &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state)
Definition: Functions.h:19951
at::Tensor & binary_cross_entropy_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, at::Tensor &grad_input)
Definition: Functions.h:1144
at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong)
Definition: Functions.h:9899
inline ::std::tuple< at::Tensor &, at::Tensor & > _pack_padded_sequence_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &input, const at::Tensor &lengths, bool batch_first)
Definition: Functions.h:23850
at::Tensor special_hermite_polynomial_h(const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19440
at::Tensor & _spdiags_outf(const at::Tensor &diagonals, const at::Tensor &offsets, at::IntArrayRef shape, c10::optional< at::Layout > layout, at::Tensor &out)
Definition: Functions.h:23284
at::Tensor _nnpack_spatial_convolution(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride=1)
Definition: Functions.h:5153
inline ::std::tuple< at::Tensor &, at::Tensor & > max_pool3d_with_indices_out(at::Tensor &out, at::Tensor &indices, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:13585
at::Tensor & as_strided_scatter_symint_outf(const at::Tensor &self, const at::Tensor &src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional< c10::SymInt > storage_offset, at::Tensor &out)
Definition: Functions.h:22759
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor > _thnn_differentiable_gru_cell_backward(const at::Tensor &grad_hy, const at::Tensor &input_gates, const at::Tensor &hidden_gates, const at::Tensor &hx, const c10::optional< at::Tensor > &input_bias, const c10::optional< at::Tensor > &hidden_bias)
Definition: Functions.h:9249
void unsafe_split_with_sizes_symint_outf(const at::Tensor &self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out)
Definition: Functions.h:22847
at::Tensor & int_repr_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:23701
inline ::std::vector< at::Tensor > split_with_sizes(const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:7291
at::Tensor & special_log1p_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17426
at::Tensor logdet(const at::Tensor &self)
Definition: Functions.h:18043
at::Tensor & less_equal_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:10107
at::Tensor transpose(const at::Tensor &self, int64_t dim0, int64_t dim1)
Definition: Functions.h:7776
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > mkldnn_rnn_layer_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, const at::Tensor &input, const at::Tensor &weight0, const at::Tensor &weight1, const at::Tensor &weight2, const at::Tensor &weight3, const at::Tensor &hx_, const at::Tensor &cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train)
Definition: Functions.h:21825
at::Tensor & relu_(at::Tensor &self)
Definition: Functions.h:6658
at::Tensor & mv_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &vec)
Definition: Functions.h:4919
at::Tensor & fft_rfftn_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17684
at::Tensor slow_conv_dilated2d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:16783
at::Tensor & _sparse_log_softmax_backward_data_outf(const at::Tensor &grad_output, const at::Tensor &output, int64_t dim, const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:23275
at::Tensor _sparse_softmax(const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:8326
at::Tensor & upsample_bilinear2d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:14972
void split_with_sizes_copy_out(at::TensorList out, const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:19007
at::Tensor & upsample_nearest2d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15852
at::Tensor & hardsigmoid_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:12930
at::Tensor & nll_loss2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, const at::Tensor &total_weight, at::Tensor &grad_input)
Definition: Functions.h:12720
at::Tensor special_log1p(const at::Tensor &self)
Definition: Functions.h:17417
inline ::std::tuple< at::Tensor &, at::Tensor & > max_pool2d_with_indices_outf(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor &out, at::Tensor &indices)
Definition: Functions.h:13561
at::Tensor bucketize(const at::Tensor &self, const at::Tensor &boundaries, bool out_int32=false, bool right=false)
Definition: Functions.h:12188
at::Tensor & diff_outf(const at::Tensor &self, int64_t n, int64_t dim, const c10::optional< at::Tensor > &prepend, const c10::optional< at::Tensor > &append, at::Tensor &out)
Definition: Functions.h:2238
void _foreach_log1p_(at::TensorList self)
Definition: Functions.h:11978
at::Tensor & linalg_vector_norm_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &ord=2, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:18249
at::Tensor det(const at::Tensor &self)
Definition: Functions.h:17907
at::Tensor & special_ndtr_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17146
at::Tensor & nll_loss2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, const at::Tensor &total_weight)
Definition: Functions.h:12709
at::Tensor linalg_matrix_rank(const at::Tensor &input, const c10::optional< at::Tensor > &atol={}, const c10::optional< at::Tensor > &rtol={}, bool hermitian=false)
Definition: Functions.h:18510
at::Tensor & div_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:2283
inline ::std::vector< at::Tensor > nonzero_numpy(const at::Tensor &self)
Definition: Functions.h:10354
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > linalg_lstsq_out(at::Tensor &solution, at::Tensor &residuals, at::Tensor &rank, at::Tensor &singular_values, const at::Tensor &self, const at::Tensor &b, c10::optional< double > rcond=c10::nullopt, c10::optional< c10::string_view > driver=c10::nullopt)
Definition: Functions.h:17959
at::Tensor smooth_l1_loss(const at::Tensor &self, const at::Tensor &target, int64_t reduction=at::Reduction::Mean, double beta=1.0)
Definition: Functions.h:12784
at::Tensor _cast_Int(const at::Tensor &self, bool non_blocking=false)
Definition: Functions.h:103
at::Tensor _reshape_alias_copy_symint(const at::Tensor &self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride)
Definition: Functions.h:18775
at::Tensor rrelu_with_noise(const at::Tensor &self, const at::Tensor &noise, const at::Scalar &lower=0.125, const at::Scalar &upper=0.3333333333333333, bool training=false, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:13104
at::Tensor _convert_indices_from_coo_to_csr(const at::Tensor &self, int64_t size, bool out_int32=false)
Definition: Functions.h:12226
at::Tensor _nested_tensor_from_tensor_list(at::TensorList list, c10::optional< at::ScalarType > dtype=c10::nullopt, c10::optional< at::Layout > layout=c10::nullopt, c10::optional< at::Device > device=c10::nullopt, c10::optional< bool > pin_memory=c10::nullopt)
Definition: Functions.h:18670
at::Tensor & linalg_eigvals_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:18071
at::Tensor & cauchy_out(at::Tensor &out, const at::Tensor &self, double median=0, double sigma=1, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:24194
at::Tensor argmin(const at::Tensor &self, c10::optional< int64_t > dim=c10::nullopt, bool keepdim=false)
Definition: Functions.h:767
at::Tensor nll_loss(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100)
Definition: Functions.h:12423
at::Tensor & replication_pad3d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef padding, at::Tensor &out)
Definition: Functions.h:14334
at::Tensor & special_entr_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:16974
at::Tensor & upsample_bilinear2d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:14928
inline ::std::tuple< at::Tensor, at::Tensor > mode(const at::Tensor &self, int64_t dim=-1, bool keepdim=false)
Definition: Functions.h:4848
at::Tensor & fractional_max_pool3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor &indices)
Definition: Functions.h:13543
inline ::std::vector< at::Tensor > _foreach_cos(at::TensorList self)
Definition: Functions.h:11893
at::Tensor & slice_copy_out(at::Tensor &out, const at::Tensor &self, int64_t dim=0, c10::optional< int64_t > start=c10::nullopt, c10::optional< int64_t > end=c10::nullopt, int64_t step=1)
Definition: Functions.h:25659
int64_t stride(const at::Tensor &self, at::Dimname dim)
Definition: Functions.h:7467
at::Tensor _gather_sparse_backward(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &grad)
Definition: Functions.h:10397
at::Tensor & _logcumsumexp_out(at::Tensor &out, const at::Tensor &self, int64_t dim)
Definition: Functions.h:4256
const at::Tensor & _resize_output_(const at::Tensor &self, at::IntArrayRef size, at::Device device)
Definition: Functions.h:2817
at::Tensor argmax(const at::Tensor &self, c10::optional< int64_t > dim=c10::nullopt, bool keepdim=false)
Definition: Functions.h:753
at::Tensor index_add(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &source, const at::Scalar &alpha=1)
Definition: Functions.h:9462
at::Tensor & upsample_nearest3d_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:16248
at::Tensor & outer_outf(const at::Tensor &self, const at::Tensor &vec2, at::Tensor &out)
Definition: Functions.h:18197
at::Tensor special_expit(const at::Tensor &self)
Definition: Functions.h:17375
bool _has_compatible_shallow_copy_type(const at::Tensor &self, const at::Tensor &from)
Definition: Functions.h:7929
at::Tensor floor(const at::Tensor &self)
Definition: Functions.h:3123
at::Tensor & row_indices_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25820
at::Tensor & logsumexp_outf(const at::Tensor &self, at::IntArrayRef dim, bool keepdim, at::Tensor &out)
Definition: Functions.h:4302
void _validate_sparse_coo_tensor_args(const at::Tensor &indices, const at::Tensor &values, at::IntArrayRef size)
Definition: Functions.h:8843
at::Tensor & bitwise_xor_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:9691
at::Tensor _histogramdd_from_bin_tensors(const at::Tensor &self, at::TensorList bins, const c10::optional< at::Tensor > &weight={}, bool density=false)
Definition: Functions.h:10880
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > miopen_batch_norm_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, bool training, double exponential_average_factor, double epsilon)
Definition: Functions.h:21843
at::Tensor as_strided_copy(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset=c10::nullopt)
Definition: Functions.h:18705
at::Tensor & clamp_(at::Tensor &self, const c10::optional< at::Scalar > &min, const c10::optional< at::Scalar > &max=c10::nullopt)
Definition: Functions.h:1512
at::Tensor & cholesky_outf(const at::Tensor &self, bool upper, at::Tensor &out)
Definition: Functions.h:10518
at::Tensor diagonal_copy(const at::Tensor &self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1)
Definition: Functions.h:18732
at::Tensor & lift_fresh_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:23976
inline ::std::vector< at::Tensor > unbind_copy(const at::Tensor &self, int64_t dim=0)
Definition: Functions.h:18949
at::Tensor & rrelu_with_noise_(at::Tensor &self, const at::Tensor &noise, const at::Scalar &lower=0.125, const at::Scalar &upper=0.3333333333333333, bool training=false, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:13114
at::Tensor & replication_pad2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14235
inline ::std::tuple< at::Tensor, at::Tensor > _ctc_loss(const at::Tensor &log_probs, const at::Tensor &targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false)
Definition: Functions.h:2162
at::Tensor special_gammainc(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:17445
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > mkldnn_rnn_layer_backward_outf(const at::Tensor &input, const at::Tensor &weight1, const at::Tensor &weight2, const at::Tensor &weight3, const at::Tensor &weight4, const at::Tensor &hx_, const at::Tensor &cx_tmp, const at::Tensor &output, const at::Tensor &hy_, const at::Tensor &cy_, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor &workspace, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4, at::Tensor &out5, at::Tensor &out6)
Definition: Functions.h:21838
at::Tensor & _ctc_loss_backward_out(at::Tensor &out, const at::Tensor &grad, const at::Tensor &log_probs, const at::Tensor &targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor &neg_log_likelihood, const at::Tensor &log_alpha, int64_t blank, bool zero_infinity=false)
Definition: Functions.h:20589
at::Tensor & _make_per_channel_quantized_tensor_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t axis)
Definition: Functions.h:23719
void _foreach_trunc_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24894
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_ldl_factor_outf(const at::Tensor &self, bool hermitian, at::Tensor &LD, at::Tensor &pivots)
Definition: Functions.h:17935
at::Tensor & to_mkldnn_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:23593
at::Tensor & convolution_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups)
Definition: Functions.h:20259
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > svd(const at::Tensor &self, bool some=true, bool compute_uv=true)
Definition: Functions.h:10499
at::Tensor & clamp_min_outf(const at::Tensor &self, const at::Scalar &min, at::Tensor &out)
Definition: Functions.h:1602
inline ::std::tuple< at::Tensor &, at::Tensor & > _ctc_loss_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &log_probs, const at::Tensor &targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false)
Definition: Functions.h:20571
at::Tensor & squeeze_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25703
at::Tensor _adaptive_avg_pool2d_symint(const at::Tensor &self, c10::SymIntArrayRef output_size)
Definition: Functions.h:13271
inline ::std::tuple< at::Tensor, at::Tensor > fractional_max_pool2d(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor &random_samples)
Definition: Functions.h:13510
at::Tensor bartlett_window(int64_t window_length, at::TensorOptions options={})
Definition: Functions.h:1059
at::Tensor & nan_to_num_outf(const at::Tensor &self, c10::optional< double > nan, c10::optional< double > posinf, c10::optional< double > neginf, at::Tensor &out)
Definition: Functions.h:3909
void _foreach_acos_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24682
at::Tensor _adaptive_avg_pool2d_backward(const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:13282
at::Tensor & mse_loss_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction, at::Tensor &grad_input)
Definition: Functions.h:12272
at::Tensor adaptive_max_pool3d_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &indices)
Definition: Functions.h:13440
at::Tensor & _nested_tensor_from_mask_out(at::Tensor &out, const at::Tensor &t, const at::Tensor &mask, bool mask_check=true)
Definition: Functions.h:22930
at::Tensor _dirichlet_grad(const at::Tensor &x, const at::Tensor &alpha, const at::Tensor &total)
Definition: Functions.h:8261
at::Tensor & div_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:2287
at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15368
void lstm_mps_backward_outf(const at::Tensor &grad_y, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, const at::Tensor &z_state, const at::Tensor &cell_state_fwd, const at::Tensor &input, const at::Tensor &layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor &out0, at::TensorList out1, at::TensorList out2)
Definition: Functions.h:23809
at::Tensor & matmul_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:4340
at::Tensor fft_ihfft2(const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17637
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_group_norm_symint_outf(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21435
at::Tensor & linalg_householder_product_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &tau)
Definition: Functions.h:18123
at::Tensor reflection_pad2d_symint(const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:13828
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _transform_bias_rescale_qkv_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &qkv, const at::Tensor &qkv_bias, int64_t num_heads)
Definition: Functions.h:22921
at::Tensor miopen_convolution_transpose(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:4755
at::Tensor & upsample_nearest3d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16105
at::Tensor igammac(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:10965
at::Tensor & special_bessel_j0_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:19225
at::Tensor & ones_symint_outf(c10::SymIntArrayRef size, at::Tensor &out)
Definition: Functions.h:5261
bool is_neg(const Tensor &tensor)
Definition: Functions.h:26090
at::Tensor gelu_backward(const at::Tensor &grad_output, const at::Tensor &self, c10::string_view approximate="none")
Definition: Functions.h:6716
at::Tensor & nanmean_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:4564
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_cholesky_ex_outf(const at::Tensor &self, bool upper, bool check_errors, at::Tensor &L, at::Tensor &info)
Definition: Functions.h:17790
at::Tensor & special_xlogy_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:17208
at::Tensor & linalg_multi_dot_outf(at::TensorList tensors, at::Tensor &out)
Definition: Functions.h:18575
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _lstm_mps_outf(const at::Tensor &input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4, at::Tensor &out5)
Definition: Functions.h:23800
at::Tensor clamp_min(const at::Tensor &self, const at::Scalar &min)
Definition: Functions.h:1578
at::Tensor & replication_pad3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14301
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor > mkldnn_rnn_layer(const at::Tensor &input, const at::Tensor &weight0, const at::Tensor &weight1, const at::Tensor &weight2, const at::Tensor &weight3, const at::Tensor &hx_, const at::Tensor &cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train)
Definition: Functions.h:4713
at::Tensor & rad2deg_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:5401
at::Tensor & sub_outf(const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:8522
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > convolution_backward_symint(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:1742
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _native_batch_norm_legit_out(at::Tensor &out, at::Tensor &save_mean, at::Tensor &save_invstd, const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, at::Tensor &running_mean, at::Tensor &running_var, bool training, double momentum, double eps)
Definition: Functions.h:5071
at::Tensor & reflection_pad2d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:13795
at::Tensor & upsample_trilinear3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15489
at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor &indices, const at::Tensor &values, c10::SymIntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:8821
at::Tensor & std_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false)
Definition: Functions.h:7602
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, int64_t > _batch_norm_impl_index(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, bool training, double momentum, double eps, bool cudnn_enabled)
Definition: Functions.h:1087
at::Tensor & ceil_(at::Tensor &self)
Definition: Functions.h:1415
at::Tensor batch_norm(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, bool training, double momentum, double eps, bool cudnn_enabled)
Definition: Functions.h:1077
at::Tensor erfinv(const at::Tensor &self)
Definition: Functions.h:10706
at::Tensor & log2_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:4097
at::Tensor & isnan_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:21517
at::Tensor adaptive_avg_pool2d_symint(const at::Tensor &self, c10::SymIntArrayRef output_size)
Definition: Functions.h:13230
void _foreach_abs_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24669
at::Tensor slow_conv3d_forward(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding)
Definition: Functions.h:16761
at::Tensor & selu_(at::Tensor &self)
Definition: Functions.h:6849
at::Tensor & _adaptive_avg_pool3d_outf(const at::Tensor &self, at::IntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:25111
at::Tensor & greater_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:10163
at::Tensor reflection_pad1d_backward_symint(const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:13762
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_group_norm_symint_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps)
Definition: Functions.h:21424
at::Tensor & randperm_out(at::Tensor &out, int64_t n)
Definition: Functions.h:6372
at::Tensor & miopen_convolution_transpose_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:21905
at::Tensor & special_erf_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17086
void _cudnn_rnn_backward_symint_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::TensorList out3, const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, const at::Tensor &output, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, const at::Tensor &reserve, ::std::array< bool, 4 > output_mask)
Definition: Functions.h:20017
at::Tensor & special_modified_bessel_i1_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:19627
at::Tensor & max_pool3d_with_indices_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor &indices, at::Tensor &grad_input)
Definition: Functions.h:13603
at::Tensor & reflection_pad2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:13850
at::Tensor & fft_irfft_out(at::Tensor &out, const at::Tensor &self, c10::optional< int64_t > n=c10::nullopt, int64_t dim=-1, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17530
at::Tensor atleast_3d(const at::Tensor &self)
Definition: Functions.h:1035
inline ::std::tuple< at::Tensor &, at::Tensor & > fractional_max_pool2d_outf(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor &random_samples, at::Tensor &output, at::Tensor &indices)
Definition: Functions.h:13505
void _foreach_maximum_out(at::TensorList out, at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:24475
inline ::std::tuple< at::Tensor, at::Tensor > lstm_cell(const at::Tensor &input, at::TensorList hx, const at::Tensor &w_ih, const at::Tensor &w_hh, const c10::optional< at::Tensor > &b_ih={}, const c10::optional< at::Tensor > &b_hh={})
Definition: Functions.h:9294
at::Tensor linalg_cross(const at::Tensor &self, const at::Tensor &other, int64_t dim=-1)
Definition: Functions.h:17809
at::Tensor & roll_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef shifts, at::IntArrayRef dims={})
Definition: Functions.h:22903
at::Tensor & float_power_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &exponent)
Definition: Functions.h:11386
at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15357
at::Tensor _fft_c2c_symint(const at::Tensor &self, c10::SymIntArrayRef dim, int64_t normalization, bool forward)
Definition: Functions.h:3542
void _foreach_exp_(at::TensorList self)
Definition: Functions.h:11828
at::Tensor _nnpack_spatial_convolution_symint(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, at::IntArrayRef stride=1)
Definition: Functions.h:5164
at::Tensor _embedding_bag_backward_symint(const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offsets, const at::Tensor &offset2bag, const at::Tensor &bag_size, const at::Tensor &maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx=-1)
Definition: Functions.h:2532
inline ::std::vector< at::Tensor > gradient(const at::Tensor &self, const c10::optional< at::Scalar > &spacing=c10::nullopt, c10::optional< int64_t > dim=c10::nullopt, int64_t edge_order=1)
Definition: Functions.h:2243
at::Tensor tril(const at::Tensor &self, int64_t diagonal=0)
Definition: Functions.h:9885
at::Tensor set(const at::Tensor &self, at::Storage source)
Definition: Functions.h:23868
at::Tensor & logical_or_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:1248
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_eigh_outf(const at::Tensor &self, c10::string_view UPLO, at::Tensor &eigvals, at::Tensor &eigvecs)
Definition: Functions.h:18099
at::Tensor & abs_(at::Tensor &self)
Definition: Functions.h:358
at::Tensor & ormqr_outf(const at::Tensor &self, const at::Tensor &input2, const at::Tensor &input3, bool left, bool transpose, at::Tensor &out)
Definition: Functions.h:10607
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _fake_quantize_learnable_per_channel_affine_backward(const at::Tensor &grad, const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0)
Definition: Functions.h:9115
at::Tensor & sign_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:10744
at::Tensor & dist_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other, const at::Scalar &p=2)
Definition: Functions.h:24286
void _amp_foreach_non_finite_check_and_unscale_out(at::TensorList out, at::TensorList self, at::Tensor &found_inf, const at::Tensor &inv_scale)
Definition: Functions.h:24393
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > unique_dim_consecutive_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &self, int64_t dim, bool return_inverse=false, bool return_counts=false)
Definition: Functions.h:23020
at::Tensor & special_erfinv_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17132
at::Tensor asin(const at::Tensor &self)
Definition: Functions.h:939
at::Tensor & _upsample_bilinear2d_aa_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15126
at::Tensor _test_ambiguous_defaults(const at::Tensor &dummy, int64_t a=1, int64_t b=1)
Definition: Functions.h:18610
bool is_complex(const Tensor &tensor)
Definition: Functions.h:26062
at::Tensor & special_hermite_polynomial_h_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19455
at::Tensor quantized_rnn_tanh_cell(const at::Tensor &input, const at::Tensor &hx, const at::Tensor &w_ih, const at::Tensor &w_hh, const at::Tensor &b_ih, const at::Tensor &b_hh, const at::Tensor &packed_ih, const at::Tensor &packed_hh, const at::Tensor &col_offsets_ih, const at::Tensor &col_offsets_hh, const at::Scalar &scale_ih, const at::Scalar &scale_hh, const at::Scalar &zero_point_ih, const at::Scalar &zero_point_hh)
Definition: Functions.h:9329
at::Tensor & special_shifted_chebyshev_polynomial_w_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19833
at::Tensor & int_repr_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:23705
at::Tensor & upsample_bilinear2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:14895
inline ::std::vector< at::Tensor > vsplit(const at::Tensor &self, int64_t sections)
Definition: Functions.h:7323
at::Tensor & var_outf(const at::Tensor &self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor &out)
Definition: Functions.h:8010
at::Tensor & randn_like_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:22452
at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:6103
at::Tensor & angle_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:391
at::Tensor & slice_scatter_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &src, int64_t dim=0, c10::optional< int64_t > start=c10::nullopt, c10::optional< int64_t > end=c10::nullopt, int64_t step=1)
Definition: Functions.h:22629
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _lu_with_info(const at::Tensor &self, bool pivot=true, bool check_errors=true)
Definition: Functions.h:10617
at::Tensor & isinf_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25298
at::Tensor & normal_symint_out(at::Tensor &out, double mean, double std, c10::SymIntArrayRef size, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:11541
at::Tensor mse_loss_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction)
Definition: Functions.h:12277
at::Tensor & reflection_pad1d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:13663
at::Tensor & quantized_batch_norm_outf(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const at::Tensor &mean, const at::Tensor &var, double eps, double output_scale, int64_t output_zero_point, at::Tensor &out)
Definition: Functions.h:20142
at::Tensor & slow_conv3d_forward_out(at::Tensor &output, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding)
Definition: Functions.h:16717
at::Tensor & arccos_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:516
at::Tensor & sspaddmm_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:7368
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > linalg_svd(const at::Tensor &A, bool full_matrices=true, c10::optional< c10::string_view > driver=c10::nullopt)
Definition: Functions.h:18300
at::Tensor & _upsample_nearest_exact3d_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:16292
at::Tensor & slow_conv_dilated3d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:25250
at::Tensor _upsample_bilinear2d_aa_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15148
inline ::std::tuple< at::Tensor &, at::Tensor & > nll_loss_forward_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, at::Tensor &output, at::Tensor &total_weight)
Definition: Functions.h:12456
at::Tensor & fix_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:7920
at::Tensor & negative_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:6479
at::Tensor max_unpool3d(const at::Tensor &self, const at::Tensor &indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding)
Definition: Functions.h:13636
at::Tensor & _adaptive_avg_pool3d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:25133
at::Tensor & special_modified_bessel_k1_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:19655
at::Tensor & reflection_pad2d_backward_symint_outf(const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:13872
at::Tensor & norm_out(at::Tensor &out, const at::Tensor &self, const c10::optional< at::Scalar > &p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype)
Definition: Functions.h:8391
at::Tensor _add_batch_dim(const at::Tensor &self, int64_t batch_dim, int64_t level)
Definition: Functions.h:16959
at::Tensor & hstack_out(at::Tensor &out, at::TensorList tensors)
Definition: Functions.h:7410
at::Tensor & special_chebyshev_polynomial_w_outf(const at::Tensor &x, const at::Tensor &n, at::Tensor &out)
Definition: Functions.h:19417
inline ::std::tuple< at::Tensor &, at::Tensor & > fractional_max_pool3d_out(at::Tensor &output, at::Tensor &indices, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor &random_samples)
Definition: Functions.h:13529
const at::Tensor & resize_as_sparse_out(const at::Tensor &out, const at::Tensor &self, const at::Tensor &the_template)
Definition: Functions.h:23330
inline ::std::tuple< at::Tensor &, at::Tensor & > topk_out(at::Tensor &values, at::Tensor &indices, const at::Tensor &self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true)
Definition: Functions.h:11261
at::Tensor & var_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false)
Definition: Functions.h:8006
at::Tensor pixel_shuffle(const at::Tensor &self, int64_t upscale_factor)
Definition: Functions.h:5356
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _thnn_fused_lstm_cell_backward_impl_outf(const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, const at::Tensor &cx, const at::Tensor &cy, const at::Tensor &workspace, bool has_bias, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:23827
at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar &fill_value, at::TensorOptions options={})
Definition: Functions.h:3211
at::Tensor & mish_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:6911
const at::Tensor & resize_as_sparse_(const at::Tensor &self, const at::Tensor &the_template)
Definition: Functions.h:8508
at::Tensor & quantize_per_channel_outf(const at::Tensor &self, const at::Tensor &scales, const at::Tensor &zero_points, int64_t axis, at::ScalarType dtype, at::Tensor &out)
Definition: Functions.h:23660
at::Tensor & hardswish_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25024
at::Tensor & silu_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:6874
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > batch_norm_backward_reduce_outf(const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &invstd, const c10::optional< at::Tensor > &weight, bool input_g, bool weight_g, bool bias_g, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3)
Definition: Functions.h:22074
at::Tensor & sub_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha=1)
Definition: Functions.h:8518
at::Tensor & fft_irfft2_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17614
at::Tensor & _segment_reduce_backward_outf(const at::Tensor &grad, const at::Tensor &output, const at::Tensor &data, c10::string_view reduce, const c10::optional< at::Tensor > &lengths, const c10::optional< at::Tensor > &offsets, int64_t axis, const c10::optional< at::Scalar > &initial, at::Tensor &out)
Definition: Functions.h:25379
at::Tensor & ormqr_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &input2, const at::Tensor &input3, bool left=true, bool transpose=false)
Definition: Functions.h:10603
inline ::std::tuple< at::Tensor, at::Tensor > _fused_dropout(const at::Tensor &self, double p, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:263
bool __dispatch__is_zerotensor(const at::Tensor &self)
Definition: Functions.h:3747
at::Tensor & take_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &index)
Definition: Functions.h:10243
at::Tensor & diagonal_backward_symint_out(at::Tensor &out, const at::Tensor &grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2)
Definition: Functions.h:20629
at::Tensor masked_fill(const at::Tensor &self, const at::Tensor &mask, const at::Scalar &value)
Definition: Functions.h:9409
at::Tensor abs(const at::Tensor &self)
Definition: Functions.h:353
at::Tensor & view_as_real_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25415
at::Tensor & repeat_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef repeats)
Definition: Functions.h:22483
at::Tensor & mkldnn_max_pool2d_outf(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor &out)
Definition: Functions.h:21695
at::Tensor special_hermite_polynomial_he(const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19482
at::Tensor & special_i0_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17286
at::Tensor & new_zeros_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef size)
Definition: Functions.h:21005
at::Tensor hamming_window(int64_t window_length, at::TensorOptions options={})
Definition: Functions.h:3386
at::Tensor & nll_loss2d_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100)
Definition: Functions.h:12599
at::Tensor & _sparse_sparse_matmul_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:22015
at::Tensor & _unsafe_view_outf(const at::Tensor &self, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:23049
at::Tensor & nll_loss_backward_symint_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor &total_weight, at::Tensor &grad_input)
Definition: Functions.h:12544
at::Tensor indices_copy(const at::Tensor &self)
Definition: Functions.h:18919
inline ::std::vector< at::Tensor > unflatten_dense_tensors(const at::Tensor &flat, at::TensorList tensors)
Definition: Functions.h:18665
at::Tensor native_dropout_backward(const at::Tensor &grad_output, const at::Tensor &mask, double scale)
Definition: Functions.h:278
at::Tensor & ones_outf(at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:5239
inline ::std::tuple< at::Tensor, at::Tensor > multilabel_margin_loss_forward(const at::Tensor &self, const at::Tensor &target, int64_t reduction)
Definition: Functions.h:12338
at::Tensor & dist_outf(const at::Tensor &self, const at::Tensor &other, const at::Scalar &p, at::Tensor &out)
Definition: Functions.h:24290
inline ::std::tuple< at::Tensor, at::Tensor > gru(const at::Tensor &input, const at::Tensor &hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first)
Definition: Functions.h:9264
at::Tensor & _index_put_impl_outf(const at::Tensor &self, const c10::List< c10::optional< at::Tensor > > &indices, const at::Tensor &values, bool accumulate, bool unsafe, at::Tensor &out)
Definition: Functions.h:21503
at::Tensor & huber_loss_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction, double delta)
Definition: Functions.h:12817
at::Tensor linalg_norm(const at::Tensor &self, const c10::optional< at::Scalar > &ord=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:18216
void _foreach_clamp_min_out(at::TensorList out, at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:24457
at::Tensor & fft_rfft_outf(const at::Tensor &self, c10::optional< int64_t > n, int64_t dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17520
at::Tensor & new_empty_strided_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride)
Definition: Functions.h:20917
at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales, at::Tensor &grad_input)
Definition: Functions.h:15698
at::Tensor & _masked_scale_outf(const at::Tensor &self, const at::Tensor &mask, double scale, at::Tensor &out)
Definition: Functions.h:20061
at::Tensor & frobenius_norm_outf(const at::Tensor &self, at::IntArrayRef dim, bool keepdim, at::Tensor &out)
Definition: Functions.h:8460
at::Tensor & _sparse_softmax_out(at::Tensor &out, const at::Tensor &self, int64_t dim, bool half_to_float)
Definition: Functions.h:23244
at::Tensor _embedding_bag_backward(const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offsets, const at::Tensor &offset2bag, const at::Tensor &bag_size, const at::Tensor &maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx=-1)
Definition: Functions.h:2521
at::Tensor & _cholesky_solve_helper_outf(const at::Tensor &self, const at::Tensor &A, bool upper, at::Tensor &out)
Definition: Functions.h:24281
at::Tensor diag_embed(const at::Tensor &self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1)
Definition: Functions.h:2182
inline ::std::tuple< at::Tensor &, at::Tensor & > _unique_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &self, bool sorted=true, bool return_inverse=false)
Definition: Functions.h:22993
at::Tensor arcsin(const at::Tensor &self)
Definition: Functions.h:958
at::Tensor & absolute_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:377
inline ::std::tuple< at::Tensor &, at::Tensor & > adaptive_max_pool2d_outf(const at::Tensor &self, at::IntArrayRef output_size, at::Tensor &out, at::Tensor &indices)
Definition: Functions.h:13393
at::Tensor _conj_physical(const at::Tensor &self)
Definition: Functions.h:444
at::Tensor & log1p_(at::Tensor &self)
Definition: Functions.h:4073
at::Tensor & ccol_indices_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25811
at::Tensor & neg_(at::Tensor &self)
Definition: Functions.h:6455
at::Tensor & binary_cross_entropy_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:1140
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > unique_dim_consecutive_outf(const at::Tensor &self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:23024
at::Tensor fft_hfft(const at::Tensor &self, c10::optional< int64_t > n=c10::nullopt, int64_t dim=-1, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17539
at::Tensor conv1d(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1)
Definition: Functions.h:1817
bool equal(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:11339
at::Tensor & hypot_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:10928
at::Tensor & linalg_matrix_rank_out(at::Tensor &out, const at::Tensor &input, const c10::optional< at::Tensor > &atol={}, const c10::optional< at::Tensor > &rtol={}, bool hermitian=false)
Definition: Functions.h:18515
at::Tensor & pixel_unshuffle_outf(const at::Tensor &self, int64_t downscale_factor, at::Tensor &out)
Definition: Functions.h:22217
at::Tensor & thnn_conv2d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:16525
at::Tensor & concat_outf(at::TensorList tensors, int64_t dim, at::Tensor &out)
Definition: Functions.h:1358
void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional< at::Tensor > &grad_scale, const c10::optional< at::Tensor > &found_inf, at::TensorList out)
Definition: Functions.h:26011
inline ::std::tuple< at::Tensor &, at::Tensor & > _weight_norm_interface_outf(const at::Tensor &v, const at::Tensor &g, int64_t dim, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:23095
inline ::std::tuple< at::Tensor, at::Tensor, int64_t, int64_t, at::Tensor > _flash_attention_forward(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, const at::Tensor &cum_seq_q, const at::Tensor &cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, bool return_debug_mask)
Definition: Functions.h:19162
at::Tensor & sparse_mask_outf(const at::Tensor &self, const at::Tensor &mask, at::Tensor &out)
Definition: Functions.h:23488
at::Tensor & rsqrt_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:6768
at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_out(at::Tensor &out, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor &indices, const at::Tensor &values)
Definition: Functions.h:23434
void _foreach_sinh_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24840
bool is_conj(const Tensor &tensor)
Definition: Functions.h:26082
at::Tensor & mish_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:6907
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > linalg_lu_factor_ex_outf(const at::Tensor &A, bool pivot, bool check_errors, at::Tensor &LU, at::Tensor &pivots, at::Tensor &info)
Definition: Functions.h:17846
at::Tensor & view_copy_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:25829
at::Tensor & cholesky_solve_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &input2, bool upper=false)
Definition: Functions.h:10528
at::Tensor & _adaptive_avg_pool3d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size)
Definition: Functions.h:25122
at::Tensor & gt_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:10135
inline ::std::tuple< at::Tensor &, at::Tensor & > nll_loss2d_forward_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, at::Tensor &output, at::Tensor &total_weight)
Definition: Functions.h:12654
at::Tensor & bitwise_or_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:9652
void _foreach_sigmoid_(at::TensorList self)
Definition: Functions.h:12088
at::Tensor & nll_loss_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100)
Definition: Functions.h:12357
inline ::std::tuple< at::Tensor &, at::Tensor & > histogram_outf(const at::Tensor &self, const at::Tensor &bins, const c10::optional< at::Tensor > &weight, bool density, at::Tensor &hist, at::Tensor &bin_edges)
Definition: Functions.h:10846
at::Tensor fbgemm_pack_gemm_matrix_fp16(const at::Tensor &input)
Definition: Functions.h:3968
at::Tensor linalg_matrix_power(const at::Tensor &self, int64_t n)
Definition: Functions.h:18496
at::Tensor & _coalesce_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:23502
at::Tensor & special_i1e_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17328
at::Tensor & upsample_trilinear3d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15500
at::Tensor & _upsample_bicubic2d_aa_outf(const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15302
inline ::std::tuple< at::Tensor, at::Tensor > batch_norm_gather_stats(const at::Tensor &input, const at::Tensor &mean, const at::Tensor &invstd, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, double momentum, double eps, int64_t count)
Definition: Functions.h:5113
at::Tensor & bucketize_outf(const at::Tensor &self, const at::Tensor &boundaries, bool out_int32, bool right, at::Tensor &out)
Definition: Functions.h:12197
void _foreach_frac_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24871
at::Tensor & addmv_outf(const at::Tensor &self, const at::Tensor &mat, const at::Tensor &vec, const at::Scalar &beta, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:598
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > svd_out(at::Tensor &U, at::Tensor &S, at::Tensor &V, const at::Tensor &self, bool some=true, bool compute_uv=true)
Definition: Functions.h:10490
at::Tensor & eye_outf(int64_t n, at::Tensor &out)
Definition: Functions.h:3059
at::Tensor & searchsorted_outf(const at::Tensor &sorted_sequence, const at::Tensor &self, bool out_int32, bool right, c10::optional< c10::string_view > side, const c10::optional< at::Tensor > &sorter, at::Tensor &out)
Definition: Functions.h:12216
void _foreach_sin_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24835
at::Tensor & hypot_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:10932
at::Tensor & batch_norm_elemt_outf(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const at::Tensor &mean, const at::Tensor &invstd, double eps, at::Tensor &out)
Definition: Functions.h:5108
inline ::std::tuple< at::Tensor &, at::Tensor & > frexp_outf(const at::Tensor &self, at::Tensor &mantissa, at::Tensor &exponent)
Definition: Functions.h:8446
at::Tensor upsample_nearest3d_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16314
inline ::std::vector< at::Tensor > _foreach_frac(at::TensorList self)
Definition: Functions.h:12063
at::Tensor & select_copy_symint_outf(const at::Tensor &self, int64_t dim, c10::SymInt index, at::Tensor &out)
Definition: Functions.h:25639
at::Tensor & add_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha=1)
Definition: Functions.h:541
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > linalg_lu_outf(const at::Tensor &A, bool pivot, at::Tensor &P, at::Tensor &L, at::Tensor &U)
Definition: Functions.h:17860
at::Tensor & cholesky_inverse_out(at::Tensor &out, const at::Tensor &self, bool upper=false)
Definition: Functions.h:10552
at::Tensor & empty_symint_out(at::Tensor &out, c10::SymIntArrayRef size, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:2853
inline ::std::vector< at::Tensor > _foreach_erfc(at::TensorList self)
Definition: Functions.h:11923
at::Tensor inverse(const at::Tensor &self)
Definition: Functions.h:18160
at::Tensor grid_sampler_3d(const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners)
Definition: Functions.h:3358
at::Tensor & _sparse_broadcast_to_copy_outf(const at::Tensor &self, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:25495
at::Tensor & mkldnn_linear_backward_input_out(at::Tensor &out, at::IntArrayRef input_size, const at::Tensor &grad_output, const at::Tensor &weight)
Definition: Functions.h:21628
at::Tensor special_ndtri(const at::Tensor &self)
Definition: Functions.h:16983
at::Tensor linalg_eigvalsh(const at::Tensor &self, c10::string_view UPLO="L")
Definition: Functions.h:18104
at::Tensor row_stack(at::TensorList tensors)
Definition: Functions.h:2492
void _foreach_minimum_outf(at::TensorList self, const at::Scalar &scalar, at::TensorList out)
Definition: Functions.h:24488
at::Tensor slow_conv3d_forward_symint(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding)
Definition: Functions.h:16772
at::Tensor & zeros_out(at::Tensor &out, at::IntArrayRef size)
Definition: Functions.h:8198
at::Tensor & renorm_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &p, int64_t dim, const at::Scalar &maxnorm)
Definition: Functions.h:11303
at::Tensor & special_hermite_polynomial_h_outf(const at::Tensor &x, const at::Tensor &n, at::Tensor &out)
Definition: Functions.h:19459
void _amp_foreach_non_finite_check_and_unscale_outf(at::TensorList self, at::Tensor &found_inf, const at::Tensor &inv_scale, at::TensorList out)
Definition: Functions.h:24397
at::Tensor & _test_optional_floatlist_out(at::Tensor &out, const at::Tensor &values, c10::optional< at::ArrayRef< double > > addends)
Definition: Functions.h:25330
void _foreach_lgamma_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24858
at::Tensor narrow_copy_symint(const at::Tensor &self, int64_t dim, c10::SymInt start, c10::SymInt length)
Definition: Functions.h:4953
at::Tensor & std_outf(const at::Tensor &self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor &out)
Definition: Functions.h:7606
inline ::std::tuple< at::Tensor &, at::Tensor & > log_sigmoid_forward_outf(const at::Tensor &self, at::Tensor &output, at::Tensor &buffer)
Definition: Functions.h:13071
at::Tensor & searchsorted_out(at::Tensor &out, const at::Tensor &sorted_sequence, const at::Tensor &self, bool out_int32=false, bool right=false, c10::optional< c10::string_view > side=c10::nullopt, const c10::optional< at::Tensor > &sorter={})
Definition: Functions.h:12212
at::Tensor _cast_Half(const at::Tensor &self, bool non_blocking=false)
Definition: Functions.h:118
bool __dispatch_is_signed(const at::Tensor &self)
Definition: Functions.h:3772
at::Tensor & special_i0e_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17296
at::Tensor & linalg_det_out(at::Tensor &out, const at::Tensor &A)
Definition: Functions.h:17898
inline ::std::tuple< at::Tensor &, at::Tensor & > fake_quantize_per_tensor_affine_cachemask_outf(const at::Tensor &self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:23732
const at::Tensor & fft_hfft2_outf(const at::Tensor &self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional< c10::string_view > norm, const at::Tensor &out)
Definition: Functions.h:17632
at::Tensor copy(const at::Tensor &self, const at::Tensor &src, bool non_blocking=false)
Definition: Functions.h:1872
at::Tensor & _to_dense_outf(const at::Tensor &self, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:23497
at::Tensor & _sparse_log_softmax_backward_data_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &output, int64_t dim, const at::Tensor &self)
Definition: Functions.h:23271
at::Tensor & linalg_solve_triangular_outf(const at::Tensor &self, const at::Tensor &B, bool upper, bool left, bool unitriangular, at::Tensor &out)
Definition: Functions.h:10475
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _unique2_outf(const at::Tensor &self, bool sorted, bool return_inverse, bool return_counts, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:23033
inline ::std::tuple< at::Tensor, at::Tensor > grid_sampler_3d_backward(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array< bool, 2 > output_mask)
Definition: Functions.h:3363
at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar &fill_value, at::Tensor &out)
Definition: Functions.h:3244
void _foreach_asin_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24687
int64_t _fused_sdp_choice(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, const c10::optional< at::Tensor > &attn_mask={}, double dropout_p=0.0, bool is_causal=false)
Definition: Functions.h:19127
at::Tensor & special_scaled_modified_bessel_k0_out(at::Tensor &out, const at::Tensor &x)
Definition: Functions.h:19669
at::Tensor & new_empty_strided_symint_outf(const at::Tensor &self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor &out)
Definition: Functions.h:20928
at::Tensor linalg_tensorsolve(const at::Tensor &self, const at::Tensor &other, at::OptionalIntArrayRef dims=c10::nullopt)
Definition: Functions.h:18468
inline ::std::vector< at::Tensor > _foreach_sigmoid(at::TensorList self)
Definition: Functions.h:12083
void _foreach_neg_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24804
at::Tensor pad_symint(const at::Tensor &self, c10::SymIntArrayRef pad, c10::string_view mode="constant", c10::optional< double > value=c10::nullopt)
Definition: Functions.h:14488
at::Tensor & fmod_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:10904
inline ::std::tuple< at::Tensor &, at::Tensor & > mps_convolution_transpose_backward_outf(const at::Tensor &self, const at::Tensor &grad_output, const at::Tensor &weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array< bool, 2 > output_mask, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:20530
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _native_decoder_only_multi_head_attention_outf(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, int64_t embed_dim, int64_t num_head, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, const c10::optional< at::Tensor > &mask, const c10::optional< at::Tensor > &incr_key, const c10::optional< at::Tensor > &incr_value, bool need_weights, bool average_attn_weights, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3)
Definition: Functions.h:25993
at::Tensor reflection_pad1d_symint(const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:13696
const at::Tensor & _conv_depthwise2d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, const at::Tensor &out)
Definition: Functions.h:16574
at::Tensor _convolution_mode(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:1790
void _foreach_erfc_(at::TensorList self)
Definition: Functions.h:11928
inline ::std::tuple< at::Tensor &, at::Tensor & > multilabel_margin_loss_forward_out(at::Tensor &output, at::Tensor &is_target, const at::Tensor &self, const at::Tensor &target, int64_t reduction)
Definition: Functions.h:12329
void _validate_sparse_csr_tensor_args(const at::Tensor &crow_indices, const at::Tensor &col_indices, const at::Tensor &values, at::IntArrayRef size)
Definition: Functions.h:8853
at::Tensor & signbit_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:10758
at::Tensor fft_fftn(const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17651
inline ::std::tuple< at::Tensor, at::Tensor > _thnn_fused_gru_cell(const at::Tensor &input_gates, const at::Tensor &hidden_gates, const at::Tensor &hx, const c10::optional< at::Tensor > &input_bias={}, const c10::optional< at::Tensor > &hidden_bias={})
Definition: Functions.h:9239
at::Tensor pixel_unshuffle(const at::Tensor &self, int64_t downscale_factor)
Definition: Functions.h:5361
at::Tensor huber_loss_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction, double delta)
Definition: Functions.h:12826
at::Tensor & _mps_convolution_transpose_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor &out)
Definition: Functions.h:20521
at::Tensor sub(const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha=1)
Definition: Functions.h:8527
at::Tensor & _upsample_nearest_exact2d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15885
at::Tensor & rand_like_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:22337
at::Tensor & gcd_(at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3309
at::Tensor & arctan_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:1006
at::Tensor & hardshrink_backward_out(at::Tensor &grad_input, const at::Tensor &grad_out, const at::Tensor &self, const at::Scalar &lambd)
Definition: Functions.h:6740
inline ::std::tuple< at::Tensor &, at::Tensor & > sort_outf(const at::Tensor &self, int64_t dim, bool descending, at::Tensor &values, at::Tensor &indices)
Definition: Functions.h:11180
at::Tensor nanmedian(const at::Tensor &self)
Definition: Functions.h:4606
at::Tensor & linalg_cross_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other, int64_t dim=-1)
Definition: Functions.h:17814
at::Tensor & logit_outf(const at::Tensor &self, c10::optional< double > eps, at::Tensor &out)
Definition: Functions.h:6954
at::Tensor & huber_loss_outf(const at::Tensor &self, const at::Tensor &target, int64_t reduction, double delta, at::Tensor &out)
Definition: Functions.h:12807
at::Tensor _reshape_alias(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride)
Definition: Functions.h:6578
at::Tensor & _upsample_bilinear2d_aa_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15027
at::Tensor & _fft_c2c_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef dim, int64_t normalization, bool forward)
Definition: Functions.h:3575
at::Tensor clone(const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:8493
at::Tensor & embedding_symint_out(at::Tensor &out, const at::Tensor &weight, const at::Tensor &indices, c10::SymInt padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false)
Definition: Functions.h:20691
at::Tensor & erfc_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:2961
at::Tensor _upsample_bicubic2d_aa_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15412
at::Tensor & upsample_linear1d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional< double > scales, at::Tensor &out)
Definition: Functions.h:14796
at::Tensor _coalesce(const at::Tensor &self)
Definition: Functions.h:8936
at::Tensor put(const at::Tensor &self, const at::Tensor &index, const at::Tensor &source, bool accumulate=false)
Definition: Functions.h:9448
at::Tensor & tan_(at::Tensor &self)
Definition: Functions.h:7691
at::Tensor & im2col_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride)
Definition: Functions.h:16907
at::Tensor sparse_coo_tensor(at::IntArrayRef size, at::TensorOptions options)
Definition: Functions.h:8772
at::Tensor & special_digamma_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17062
at::Tensor _convert_indices_from_csr_to_coo(const at::Tensor &crow_indices, const at::Tensor &col_indices, bool out_int32=false, bool transpose=false)
Definition: Functions.h:12240
at::Tensor & upsample_bilinear2d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:14917
at::Tensor binomial(const at::Tensor &count, const at::Tensor &prob, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:8276
at::Tensor nll_loss_backward_symint(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor &total_weight)
Definition: Functions.h:12566
at::Tensor & special_legendre_polynomial_p_outf(const at::Tensor &x, const at::Tensor &n, at::Tensor &out)
Definition: Functions.h:19585
at::Tensor reflection_pad1d(const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13685
at::Tensor & new_zeros_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:20983
at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15874
at::Tensor & hamming_window_outf(int64_t window_length, at::Tensor &out)
Definition: Functions.h:21343
at::Tensor rand_like(const at::Tensor &self, at::TensorOptions options={}, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:5702
void unsafe_split_with_sizes_outf(const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out)
Definition: Functions.h:22825
at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales, at::Tensor &out)
Definition: Functions.h:15610
at::Tensor col_indices_copy(const at::Tensor &self)
Definition: Functions.h:18934
at::Tensor & kron_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:3796
at::Tensor & slow_conv_transpose3d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:16466
at::Tensor & where_outf(const at::Tensor &condition, const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:8086
void split_copy_symint_outf(const at::Tensor &self, c10::SymInt split_size, int64_t dim, at::TensorList out)
Definition: Functions.h:18996
at::Tensor & leaky_relu_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &negative_slope=0.01)
Definition: Functions.h:13020
at::Tensor & inner_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:18183
at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15379
at::Tensor & values_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25784
at::Tensor _nested_sum_backward(const at::Tensor &grad, const at::Tensor &self, at::OptionalIntArrayRef dim, bool keepdim=false)
Definition: Functions.h:7505
at::Tensor & clamp_out(at::Tensor &out, const at::Tensor &self, const c10::optional< at::Scalar > &min, const c10::optional< at::Scalar > &max=c10::nullopt)
Definition: Functions.h:1522
at::Tensor & _empty_per_channel_affine_quantized_out(at::Tensor &out, at::IntArrayRef size, const at::Tensor &scales, const at::Tensor &zero_points, int64_t axis, c10::optional< at::MemoryFormat > memory_format=MemoryFormat::Contiguous)
Definition: Functions.h:21080
at::Tensor & tril_indices_out(at::Tensor &out, int64_t row, int64_t col, int64_t offset=0)
Definition: Functions.h:24250
at::Tensor & erf_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:2942
at::Tensor & xlogy_(at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4149
const at::Tensor & _conv_depthwise2d_symint_out(const at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation)
Definition: Functions.h:16585
at::Tensor values_copy(const at::Tensor &self)
Definition: Functions.h:18924
at::Tensor & ldexp_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4003
at::Tensor fractional_max_pool2d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor &indices)
Definition: Functions.h:13524
at::Tensor & constant_pad_nd_symint_outf(const at::Tensor &self, c10::SymIntArrayRef pad, const at::Scalar &value, at::Tensor &out)
Definition: Functions.h:20248
at::Tensor gt(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10140
at::Tensor & _empty_affine_quantized_out(at::Tensor &out, at::IntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional< at::MemoryFormat > memory_format=MemoryFormat::Contiguous)
Definition: Functions.h:21071
at::Tensor & ne_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:9939
at::Tensor & copy_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &src, bool non_blocking=false)
Definition: Functions.h:20418
at::Tensor upsample_bilinear2d_symint(const at::Tensor &input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14532
at::Tensor & multiply_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4900
at::Tensor quantize_per_tensor(const at::Tensor &self, double scale, int64_t zero_point, at::ScalarType dtype)
Definition: Functions.h:8990
at::Tensor & diagonal_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor &out)
Definition: Functions.h:20640
inline ::std::tuple< at::Tensor &, at::Tensor & > _linalg_eigh_out(at::Tensor &eigenvalues, at::Tensor &eigenvectors, const at::Tensor &A, c10::string_view UPLO="L", bool compute_v=true)
Definition: Functions.h:18081
at::Tensor multinomial(const at::Tensor &self, int64_t num_samples, bool replacement=false, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:10659
at::Tensor linalg_householder_product(const at::Tensor &input, const at::Tensor &tau)
Definition: Functions.h:18118
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_lu_factor_outf(const at::Tensor &A, bool pivot, at::Tensor &LU, at::Tensor &pivots)
Definition: Functions.h:17832
at::Tensor & celu_outf(const at::Tensor &self, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:22580
at::Tensor & select_copy_outf(const at::Tensor &self, int64_t dim, int64_t index, at::Tensor &out)
Definition: Functions.h:25617
inline ::std::vector< at::Tensor > _foreach_erf(at::TensorList self)
Definition: Functions.h:11913
at::Tensor & gcd_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3295
inline ::std::tuple< at::Tensor, at::Tensor > _weight_norm_differentiable_backward(const at::Tensor &grad_w, const at::Tensor &saved_v, const at::Tensor &saved_g, const at::Tensor &saved_norms, int64_t dim)
Definition: Functions.h:8131
at::Tensor & _fw_primal_copy_out(at::Tensor &out, const at::Tensor &self, int64_t level)
Definition: Functions.h:25393
at::Tensor & exponential_out(at::Tensor &out, const at::Tensor &self, double lambd=1, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:24222
at::Tensor resolve_neg(const at::Tensor &self)
Definition: Functions.h:473
void _fused_adamw_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional< at::Tensor > &grad_scale={}, const c10::optional< at::Tensor > &found_inf={})
Definition: Functions.h:26021
at::Tensor & special_legendre_polynomial_p_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19581
at::Tensor grid_sampler(const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners)
Definition: Functions.h:3333
at::Tensor & special_chebyshev_polynomial_u_outf(const at::Tensor &x, const at::Tensor &n, at::Tensor &out)
Definition: Functions.h:19333
at::Tensor vdot(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:2387
bool allclose(const at::Tensor &self, const at::Tensor &other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false)
Definition: Functions.h:670
at::Tensor ones(at::IntArrayRef size, c10::optional< at::DimnameList > names, at::TensorOptions options={})
Definition: Functions.h:5175
const at::Tensor & resize_symint_out(const at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef size, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:21111
at::Tensor & fractional_max_pool3d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor &indices, at::Tensor &grad_input)
Definition: Functions.h:13547
at::Tensor & rand_symint_out(at::Tensor &out, c10::SymIntArrayRef size)
Definition: Functions.h:5636
at::Tensor & rand_like_outf(const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:22341
at::Tensor msort(const at::Tensor &self)
Definition: Functions.h:11241
at::Tensor angle(const at::Tensor &self)
Definition: Functions.h:386
at::Tensor addmv(const at::Tensor &self, const at::Tensor &mat, const at::Tensor &vec, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:584
at::Tensor & replication_pad2d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef padding, at::Tensor &out)
Definition: Functions.h:14202
at::Tensor & upsample_nearest3d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:16116
inline ::std::tuple< at::Tensor, at::Tensor > _weight_norm_interface(const at::Tensor &v, const at::Tensor &g, int64_t dim=0)
Definition: Functions.h:8121
at::Tensor & native_norm_outf(const at::Tensor &self, const at::Scalar &p, at::Tensor &out)
Definition: Functions.h:23194
at::Tensor & logit_(at::Tensor &self, c10::optional< double > eps=c10::nullopt)
Definition: Functions.h:6945
at::Tensor zero(const at::Tensor &self)
Definition: Functions.h:23353
at::Tensor & randn_symint_out(at::Tensor &out, c10::SymIntArrayRef size)
Definition: Functions.h:6279
at::Tensor & _adaptive_avg_pool2d_backward_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:25091
at::Tensor linspace(const at::Scalar &start, const at::Scalar &end, int64_t steps, at::TensorOptions options={})
Definition: Functions.h:4012
at::Tensor orgqr(const at::Tensor &self, const at::Tensor &input2)
Definition: Functions.h:10589
at::Tensor & rot90_out(at::Tensor &out, const at::Tensor &self, int64_t k=1, at::IntArrayRef dims={0, 1})
Definition: Functions.h:22912
at::Tensor quantize_per_channel(const at::Tensor &self, const at::Tensor &scales, const at::Tensor &zero_points, int64_t axis, at::ScalarType dtype)
Definition: Functions.h:9005
at::Tensor & ccol_indices_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25815
at::Tensor _reshape_copy(const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:6556
at::Tensor cummaxmin_backward(const at::Tensor &grad, const at::Tensor &input, const at::Tensor &indices, int64_t dim)
Definition: Functions.h:2076
at::Tensor & reflection_pad3d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:13993
at::Tensor & sigmoid_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:6935
at::Tensor & silu_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:6883
inline ::std::tuple< at::Tensor &, at::Tensor & > std_mean_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &self, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< int64_t > correction=c10::nullopt, bool keepdim=false)
Definition: Functions.h:22867
at::Tensor permute_copy(const at::Tensor &self, at::IntArrayRef dims)
Definition: Functions.h:18759
inline ::std::tuple< at::Tensor, at::Tensor > mkldnn_linear_backward_weights(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, bool bias_defined)
Definition: Functions.h:3943
at::Tensor & _copy_from_outf(const at::Tensor &self, const at::Tensor &dst, bool non_blocking, at::Tensor &out)
Definition: Functions.h:20431
at::Tensor & glu_out(at::Tensor &out, const at::Tensor &self, int64_t dim=-1)
Definition: Functions.h:12892
at::Tensor _embedding_bag_dense_backward_symint(const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offset2bag, const at::Tensor &bag_size, const at::Tensor &maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx=-1)
Definition: Functions.h:2576
at::Tensor & randint_outf(int64_t high, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:5898
at::Tensor & slow_conv3d_forward_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor &output)
Definition: Functions.h:16728
at::Tensor & mkldnn_adaptive_avg_pool2d_outf(const at::Tensor &self, at::IntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:13250
at::Tensor & slow_conv_transpose2d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:16389
inline ::std::tuple< at::Tensor, at::Tensor > matmul_backward(const at::Tensor &grad, const at::Tensor &self, const at::Tensor &other, ::std::array< bool, 2 > mask)
Definition: Functions.h:4331
at::Tensor & _sobol_engine_scramble_(at::Tensor &self, const at::Tensor &ltm, int64_t dimension)
Definition: Functions.h:293
at::Tensor & miopen_convolution_transpose_symint_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor &out)
Definition: Functions.h:21938
at::Tensor & _sparse_softmax_backward_data_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &output, int64_t dim, const at::Tensor &self)
Definition: Functions.h:23253
at::Tensor exp(const at::Tensor &self)
Definition: Functions.h:2966
at::Tensor & conv_depthwise3d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:25173
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_solve_ex_out(at::Tensor &result, at::Tensor &info, const at::Tensor &A, const at::Tensor &B, bool left=true, bool check_errors=false)
Definition: Functions.h:18431
at::Tensor & logical_xor_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:1224
at::Tensor & sqrt_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:7538
at::Tensor & fft_fftfreq_out(at::Tensor &out, int64_t n, double d=1.0)
Definition: Functions.h:17744
at::Tensor any(const at::Tensor &self, int64_t dim, bool keepdim=false)
Definition: Functions.h:675
at::Tensor & native_dropout_backward_outf(const at::Tensor &grad_output, const at::Tensor &mask, double scale, at::Tensor &out)
Definition: Functions.h:20079
at::Tensor & avg_pool2d_outf(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional< int64_t > divisor_override, at::Tensor &out)
Definition: Functions.h:13449
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _batch_norm_impl_index_backward(int64_t impl_index, const at::Tensor &input, const at::Tensor &grad_output, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, const c10::optional< at::Tensor > &save_mean, const c10::optional< at::Tensor > &save_var_transform, bool train, double eps, ::std::array< bool, 3 > output_mask, const at::Tensor &reservedSpace)
Definition: Functions.h:1092
const at::Tensor & sparse_resize_outf(const at::Tensor &self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor &out)
Definition: Functions.h:23460
at::Tensor & q_per_channel_scales_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:23683
at::Tensor & less_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:10219
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_cholesky_ex_out(at::Tensor &L, at::Tensor &info, const at::Tensor &self, bool upper=false, bool check_errors=false)
Definition: Functions.h:17786
inline ::std::tuple< at::Tensor &, at::Tensor & > _unique_outf(const at::Tensor &self, bool sorted, bool return_inverse, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:22997
at::Tensor & feature_alpha_dropout_(at::Tensor &self, double p, bool train)
Definition: Functions.h:348
at::Tensor & linalg_cross_outf(const at::Tensor &self, const at::Tensor &other, int64_t dim, at::Tensor &out)
Definition: Functions.h:17818
at::Tensor _upsample_bilinear2d_aa_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15137
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > mps_convolution_backward_outf(const at::Tensor &self, const at::Tensor &grad_output, const at::Tensor &weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21776
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > miopen_batch_norm_outf(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21847
at::Tensor & bmm_outf(const at::Tensor &self, const at::Tensor &mat2, at::Tensor &out)
Definition: Functions.h:1284
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _native_batch_norm_legit(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, at::Tensor &running_mean, at::Tensor &running_var, bool training, double momentum, double eps)
Definition: Functions.h:5066
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _thnn_fused_lstm_cell_backward_impl(const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, const at::Tensor &cx, const at::Tensor &cy, const at::Tensor &workspace, bool has_bias)
Definition: Functions.h:9224
at::Tensor & dstack_outf(at::TensorList tensors, at::Tensor &out)
Definition: Functions.h:7442
at::Tensor & special_logsumexp_outf(const at::Tensor &self, at::IntArrayRef dim, bool keepdim, at::Tensor &out)
Definition: Functions.h:17370
at::Tensor clip(const at::Tensor &self, const c10::optional< at::Scalar > &min, const c10::optional< at::Scalar > &max=c10::nullopt)
Definition: Functions.h:1616
inline ::std::vector< at::Tensor > _foreach_floor(at::TensorList self)
Definition: Functions.h:11943
at::Tensor & nonzero_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:10344
at::Tensor & linalg_ldl_solve_out(at::Tensor &out, const at::Tensor &LD, const at::Tensor &pivots, const at::Tensor &B, bool hermitian=false)
Definition: Functions.h:17945
at::Tensor & index_out(at::Tensor &out, const at::Tensor &self, const c10::List< c10::optional< at::Tensor > > &indices)
Definition: Functions.h:3627
at::Tensor & eye_out(at::Tensor &out, int64_t n)
Definition: Functions.h:3055
at::Tensor feature_dropout(const at::Tensor &input, double p, bool train)
Definition: Functions.h:323
at::Tensor normal(const at::Tensor &mean, double std=1, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:11442
at::Tensor & rand_out(at::Tensor &out, at::IntArrayRef size)
Definition: Functions.h:5614
at::Tensor fft_fftshift(const at::Tensor &self, at::OptionalIntArrayRef dim=c10::nullopt)
Definition: Functions.h:17771
at::Tensor & mkldnn_max_pool2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &output, const at::Tensor &input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor &out)
Definition: Functions.h:21704
at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong)
Definition: Functions.h:5711
void _foreach_ceil_(at::TensorList self)
Definition: Functions.h:11888
at::Tensor & _upsample_bicubic2d_aa_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15291
at::Tensor & expm1_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:3018
at::Tensor asinh(const at::Tensor &self)
Definition: Functions.h:819
at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor &input, at::OptionalSymIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14708
at::Tensor & replication_pad2d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14191
at::Tensor & upsample_nearest1d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15555
at::Tensor & column_stack_out(at::Tensor &out, at::TensorList tensors)
Definition: Functions.h:16898
bool _nested_tensor_from_mask_left_aligned(const at::Tensor &t, const at::Tensor &mask)
Definition: Functions.h:7856
at::Tensor round(const at::Tensor &self)
Definition: Functions.h:6605
at::Tensor & matrix_power_outf(const at::Tensor &self, int64_t n, at::Tensor &out)
Definition: Functions.h:4354
at::Tensor & asin_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:953
at::Tensor & fft_rfftn_outf(const at::Tensor &self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17688
at::Tensor frobenius_norm(const at::Tensor &self, at::IntArrayRef dim, bool keepdim=false)
Definition: Functions.h:8451
at::Tensor select_backward(const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index)
Definition: Functions.h:6800
at::Tensor _sparse_csr_prod(const at::Tensor &self, at::IntArrayRef dim, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:8321
at::Tensor & smooth_l1_loss_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction, double beta)
Definition: Functions.h:12789
at::Tensor & hardswish_(at::Tensor &self)
Definition: Functions.h:13010
at::Tensor broadcast_to(const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:1294
at::Tensor upsample_linear1d(const at::Tensor &input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14499
at::Tensor log_sigmoid(const at::Tensor &self)
Definition: Functions.h:13062
inline ::std::vector< at::Tensor > dsplit(const at::Tensor &self, int64_t sections)
Definition: Functions.h:7333
at::Tensor & vstack_outf(at::TensorList tensors, at::Tensor &out)
Definition: Functions.h:7428
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_eig_out(at::Tensor &eigenvalues, at::Tensor &eigenvectors, const at::Tensor &self)
Definition: Functions.h:18053
at::Tensor where(const at::Tensor &condition, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:8077
at::Tensor & asinh_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:833
at::Tensor clamp(const at::Tensor &self, const c10::optional< at::Scalar > &min, const c10::optional< at::Scalar > &max=c10::nullopt)
Definition: Functions.h:1502
at::Tensor select_copy(const at::Tensor &self, int64_t dim, int64_t index)
Definition: Functions.h:18786
at::Tensor slow_conv3d_symint(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0))
Definition: Functions.h:16706
at::Tensor linalg_solve(const at::Tensor &A, const at::Tensor &B, bool left=true)
Definition: Functions.h:18440
at::Tensor fft_fft2(const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17567
at::Tensor & celu_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &alpha=1.0)
Definition: Functions.h:22576
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _slow_conv2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor &grad_input, at::Tensor &grad_weight, at::Tensor &grad_bias)
Definition: Functions.h:16553
at::Tensor upsample_nearest2d(const at::Tensor &input, at::OptionalIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14675
at::Tensor & upsample_bilinear2d_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:14994
at::Tensor & replication_pad3d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14323
inline ::std::tuple< at::Tensor, at::Tensor > grid_sampler_2d_backward(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array< bool, 2 > output_mask)
Definition: Functions.h:3343
at::Tensor & normal_outf(const at::Tensor &mean, double std, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:11437
at::Tensor & _nested_from_padded_outf(const at::Tensor &padded, const at::Tensor &cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor &out)
Definition: Functions.h:22943
at::Tensor & col_indices_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25802
at::Tensor exponential(const at::Tensor &self, double lambd=1, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:24231
inline ::std::vector< at::Tensor > unsafe_split(const at::Tensor &self, int64_t split_size, int64_t dim=0)
Definition: Functions.h:7203
at::Tensor slice_copy(const at::Tensor &self, int64_t dim=0, c10::optional< int64_t > start=c10::nullopt, c10::optional< int64_t > end=c10::nullopt, int64_t step=1)
Definition: Functions.h:18813
inline ::std::tuple< at::Tensor &, at::Tensor & > _thnn_fused_gru_cell_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &input_gates, const at::Tensor &hidden_gates, const at::Tensor &hx, const c10::optional< at::Tensor > &input_bias={}, const c10::optional< at::Tensor > &hidden_bias={})
Definition: Functions.h:23832
at::Tensor _new_zeros_with_same_feature_meta(const at::Tensor &self, const at::Tensor &other, int64_t self_num_batch_dims=0)
Definition: Functions.h:133
at::Tensor range(const at::Scalar &start, const at::Scalar &end, const at::Scalar &step=1, at::TensorOptions options={})
Definition: Functions.h:6390
inline ::std::tuple< at::Tensor &, at::Tensor & > slogdet_outf(const at::Tensor &self, at::Tensor &sign, at::Tensor &logabsdet)
Definition: Functions.h:18038
at::Tensor & linalg_ldl_solve_outf(const at::Tensor &LD, const at::Tensor &pivots, const at::Tensor &B, bool hermitian, at::Tensor &out)
Definition: Functions.h:17949
at::Tensor matrix_power(const at::Tensor &self, int64_t n)
Definition: Functions.h:4345
at::Tensor & miopen_convolution_transpose_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:21927
bool __dispatch_is_floating_point(const at::Tensor &self)
Definition: Functions.h:3732
at::Tensor & replication_pad2d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14257
at::Tensor stack(at::TensorList tensors, int64_t dim=0)
Definition: Functions.h:7377
at::Tensor & bucketize_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &boundaries, bool out_int32=false, bool right=false)
Definition: Functions.h:12193
at::Tensor & special_modified_bessel_i0_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:19613
at::Tensor embedding(const at::Tensor &weight, const at::Tensor &indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false)
Definition: Functions.h:2406
at::Tensor & deg2rad_(at::Tensor &self)
Definition: Functions.h:5415
at::Tensor & values_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25788
at::Tensor & full_like_outf(const at::Tensor &self, const at::Scalar &fill_value, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:21262
at::Tensor & special_ndtri_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:16992
at::Tensor & slow_conv_transpose2d_symint_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:16422
inline ::std::tuple< at::Tensor, at::Tensor > max(const at::Tensor &self, int64_t dim, bool keepdim=false)
Definition: Functions.h:4407
at::Tensor & tanh_backward_outf(const at::Tensor &grad_output, const at::Tensor &output, at::Tensor &grad_input)
Definition: Functions.h:16379
inline ::std::tuple< at::Tensor, at::Tensor > _scaled_dot_product_attention_math(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, const c10::optional< at::Tensor > &attn_mask={}, double dropout_p=0.0, bool is_causal=false, const c10::optional< at::Tensor > &dropout_mask={})
Definition: Functions.h:19132
at::Tensor _euclidean_dist(const at::Tensor &x1, const at::Tensor &x2)
Definition: Functions.h:5291
at::Tensor atan2(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:10781
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > linalg_svd_outf(const at::Tensor &A, bool full_matrices, c10::optional< c10::string_view > driver, at::Tensor &U, at::Tensor &S, at::Tensor &Vh)
Definition: Functions.h:18309
void _foreach_floor_(at::TensorList self)
Definition: Functions.h:11948
at::Tensor & acos_(at::Tensor &self)
Definition: Functions.h:488
at::Tensor & max_pool2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor &out)
Definition: Functions.h:21686
at::Tensor & slow_conv_dilated2d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:25206
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _native_batch_norm_legit_outf(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, at::Tensor &running_mean, at::Tensor &running_var, bool training, double momentum, double eps, at::Tensor &out, at::Tensor &save_mean, at::Tensor &save_invstd)
Definition: Functions.h:5075
at::Tensor & grid_sampler_3d_outf(const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor &out)
Definition: Functions.h:21307
at::Tensor miopen_convolution_relu(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:4799
at::Tensor & tan_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:7700
at::Tensor & select_scatter_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &src, int64_t dim, int64_t index)
Definition: Functions.h:22673
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > linalg_lu_factor_ex_out(at::Tensor &LU, at::Tensor &pivots, at::Tensor &info, const at::Tensor &A, bool pivot=true, bool check_errors=false)
Definition: Functions.h:17842
at::Tensor & linalg_inv_out(at::Tensor &out, const at::Tensor &A)
Definition: Functions.h:18151
at::Tensor sparse_bsc_tensor(const at::Tensor &ccol_indices, const at::Tensor &row_indices, const at::Tensor &values, at::IntArrayRef size, at::TensorOptions options)
Definition: Functions.h:8673
at::Tensor special_exp2(const at::Tensor &self)
Definition: Functions.h:17025
at::Tensor & slice_scatter_outf(const at::Tensor &self, const at::Tensor &src, int64_t dim, c10::optional< int64_t > start, c10::optional< int64_t > end, int64_t step, at::Tensor &out)
Definition: Functions.h:22640
inline ::std::tuple< at::Tensor &, at::Tensor & > slogdet_out(at::Tensor &sign, at::Tensor &logabsdet, const at::Tensor &self)
Definition: Functions.h:18034
at::Tensor & upsample_trilinear3d_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15522
at::Tensor zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:8176
inline ::std::vector< at::Tensor > _foreach_asin(at::TensorList self)
Definition: Functions.h:11863
at::Tensor bitwise_xor(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9709
at::Tensor tile(const at::Tensor &self, at::IntArrayRef dims)
Definition: Functions.h:7771
at::Tensor embedding_sparse_backward(const at::Tensor &grad, const at::Tensor &indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq)
Definition: Functions.h:2477
inline ::std::tuple< at::Tensor &, at::Tensor & > aminmax_out(at::Tensor &min, at::Tensor &max, const at::Tensor &self, c10::optional< int64_t > dim=c10::nullopt, bool keepdim=false)
Definition: Functions.h:4384
at::Tensor einsum(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path=c10::nullopt)
Definition: Functions.h:2401
at::Tensor diagflat(const at::Tensor &self, int64_t offset=0)
Definition: Functions.h:2187
inline ::std::tuple< at::Tensor, at::Tensor > _cudnn_ctc_loss(const at::Tensor &log_probs, const at::Tensor &targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity)
Definition: Functions.h:168
at::Tensor alias_copy(const at::Tensor &self)
Definition: Functions.h:19083
at::Tensor & mkldnn_max_pool3d_backward_outf(const at::Tensor &grad_output, const at::Tensor &output, const at::Tensor &input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor &out)
Definition: Functions.h:21722
at::Tensor & _nnpack_spatial_convolution_symint_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, at::IntArrayRef stride=1)
Definition: Functions.h:22119
at::Tensor & special_bessel_y1_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:19267
at::Tensor & trunc_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:7901
at::Tensor & scatter_add_out(at::Tensor &out, const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &src)
Definition: Functions.h:9577
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _thnn_fused_lstm_cell(const at::Tensor &input_gates, const at::Tensor &hidden_gates, const at::Tensor &cx, const c10::optional< at::Tensor > &input_bias={}, const c10::optional< at::Tensor > &hidden_bias={})
Definition: Functions.h:9219
void _assert_tensor_metadata(const at::Tensor &a, at::OptionalIntArrayRef size=c10::nullopt, at::OptionalIntArrayRef stride=c10::nullopt, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:153
at::Tensor baddbmm(const at::Tensor &self, const at::Tensor &batch1, const at::Tensor &batch2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:1045
at::Tensor rsub(const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha=1)
Definition: Functions.h:8556
at::Tensor & igamma_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:10946
at::Tensor & gelu_(at::Tensor &self, c10::string_view approximate="none")
Definition: Functions.h:6697
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > lu_unpack_outf(const at::Tensor &LU_data, const at::Tensor &LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor &P, at::Tensor &L, at::Tensor &U)
Definition: Functions.h:10645
at::Tensor flatten_dense_tensors(at::TensorList tensors)
Definition: Functions.h:18660
at::Tensor relu(const at::Tensor &self)
Definition: Functions.h:6653
at::Tensor special_erf(const at::Tensor &self)
Definition: Functions.h:17081
at::Tensor bitwise_or(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9666
at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor &ccol_indices, const at::Tensor &row_indices, const at::Tensor &values, at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:8745
at::Tensor nextafter(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:10979
at::Tensor & multinomial_out(at::Tensor &out, const at::Tensor &self, int64_t num_samples, bool replacement=false, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:10650
at::Tensor & _dirichlet_grad_outf(const at::Tensor &x, const at::Tensor &alpha, const at::Tensor &total, at::Tensor &out)
Definition: Functions.h:23158
at::Tensor upsample_nearest3d(const at::Tensor &input, at::OptionalIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14719
at::Tensor special_erfinv(const at::Tensor &self)
Definition: Functions.h:17123
at::Tensor & linalg_vecdot_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &y, int64_t dim=-1)
Definition: Functions.h:17987
at::Tensor thnn_conv2d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0)
Definition: Functions.h:16530
at::Tensor slice_backward(const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step)
Definition: Functions.h:7058
at::Tensor & abs_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:367
at::Tensor _prelu_kernel(const at::Tensor &self, const at::Tensor &weight)
Definition: Functions.h:6678
at::Tensor & logit_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, c10::optional< double > eps=c10::nullopt)
Definition: Functions.h:16361
at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Tensor &x, const at::Tensor &n, at::Tensor &out)
Definition: Functions.h:19753
at::Tensor randint_symint(int64_t high, c10::SymIntArrayRef size, at::TensorOptions options=at::kLong)
Definition: Functions.h:5733
at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:16138
at::Tensor mkldnn_max_pool3d_backward(const at::Tensor &grad_output, const at::Tensor &output, const at::Tensor &input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:4506
inline ::std::tuple< at::Tensor, at::Tensor > _fused_moving_avg_obs_fq_helper(const at::Tensor &self, const at::Tensor &observer_on, const at::Tensor &fake_quant_on, at::Tensor &running_min, at::Tensor &running_max, at::Tensor &scale, at::Tensor &zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false)
Definition: Functions.h:9125
at::Tensor & _embedding_bag_per_sample_weights_backward_outf(const at::Tensor &grad, const at::Tensor &weight, const at::Tensor &indices, const at::Tensor &offsets, const at::Tensor &offset2bag, int64_t mode, int64_t padding_idx, at::Tensor &out)
Definition: Functions.h:20837
at::Tensor embedding_backward_symint(const at::Tensor &grad, const at::Tensor &indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse)
Definition: Functions.h:2439
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _cudnn_rnn_symint_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4, const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const c10::optional< at::Tensor > &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state)
Definition: Functions.h:19973
at::Tensor & slow_conv3d_forward_symint_out(at::Tensor &output, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding)
Definition: Functions.h:16739
void _foreach_sinh_(at::TensorList self)
Definition: Functions.h:12038
at::Tensor & acos_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:497
at::Tensor special_digamma(const at::Tensor &self)
Definition: Functions.h:17053
at::Tensor & _cdist_forward_outf(const at::Tensor &x1, const at::Tensor &x2, double p, c10::optional< int64_t > compute_mode, at::Tensor &out)
Definition: Functions.h:22172
at::Tensor & empty_like_outf(const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:21182
at::Tensor _cast_Short(const at::Tensor &self, bool non_blocking=false)
Definition: Functions.h:113
at::Tensor & copy_sparse_to_sparse_outf(const at::Tensor &self, const at::Tensor &src, bool non_blocking, at::Tensor &out)
Definition: Functions.h:23529
at::Tensor & _upsample_bilinear2d_aa_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15115
at::Tensor _pack_padded_sequence_backward_symint(const at::Tensor &grad, c10::SymIntArrayRef input_size, const at::Tensor &batch_sizes, bool batch_first)
Definition: Functions.h:9350
at::Tensor & replication_pad2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14169
at::Tensor & fill_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &value)
Definition: Functions.h:21231
at::Tensor & reflection_pad2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13773
at::Tensor & linalg_solve_out(at::Tensor &out, const at::Tensor &A, const at::Tensor &B, bool left=true)
Definition: Functions.h:18445
at::Tensor & cosh_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:1920
at::Tensor & erf_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:2938
at::Tensor & _softmax_out(at::Tensor &out, const at::Tensor &self, int64_t dim, bool half_to_float)
Definition: Functions.h:7180
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > convolution_backward_outf(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:20314
at::Tensor reflection_pad2d(const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13817
at::Tensor & to_sparse_bsr_outf(const at::Tensor &self, at::IntArrayRef blocksize, c10::optional< int64_t > dense_dim, at::Tensor &out)
Definition: Functions.h:23579
at::Tensor & special_hermite_polynomial_he_outf(const at::Tensor &x, const at::Tensor &n, at::Tensor &out)
Definition: Functions.h:19501
at::Tensor linalg_multi_dot(at::TensorList tensors)
Definition: Functions.h:18566
at::Tensor _upsample_nearest_exact3d_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16336
at::Tensor & index_select_out(at::Tensor &out, const at::Tensor &self, int64_t dim, const at::Tensor &index)
Definition: Functions.h:10271
at::Tensor & lift_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:23971
at::Tensor & leaky_relu_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &negative_slope, bool self_is_result)
Definition: Functions.h:13034
at::Tensor & upsample_nearest2d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15841
at::Tensor & fft_rfft2_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17600
at::Tensor & reflection_pad3d_outf(const at::Tensor &self, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:13916
at::Tensor & sigmoid_backward_outf(const at::Tensor &grad_output, const at::Tensor &output, at::Tensor &grad_input)
Definition: Functions.h:16351
at::Tensor & reflection_pad3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13905
at::Tensor & msort_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:11236
at::Tensor & uniform_outf(const at::Tensor &self, double from, double to, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:24184
at::Tensor gru_cell(const at::Tensor &input, const at::Tensor &hx, const at::Tensor &w_ih, const at::Tensor &w_hh, const c10::optional< at::Tensor > &b_ih={}, const c10::optional< at::Tensor > &b_hh={})
Definition: Functions.h:9299
at::Tensor _coalesced(const at::Tensor &self, bool coalesced)
Definition: Functions.h:23520
at::Tensor _upsample_nearest_exact2d(const at::Tensor &input, at::OptionalIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14697
at::Tensor & ge_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:10023
at::Tensor & logaddexp2_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4120
at::Tensor & linalg_solve_triangular_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &B, bool upper, bool left=true, bool unitriangular=false)
Definition: Functions.h:10471
at::Tensor & _euclidean_dist_outf(const at::Tensor &x1, const at::Tensor &x2, at::Tensor &out)
Definition: Functions.h:22163
at::Tensor miopen_convolution_add_relu(const at::Tensor &self, const at::Tensor &weight, const at::Tensor &z, const c10::optional< at::Scalar > &alpha, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:4804
at::Tensor & nanquantile_outf(const at::Tensor &self, const at::Tensor &q, c10::optional< int64_t > dim, bool keepdim, c10::string_view interpolation, at::Tensor &out)
Definition: Functions.h:11157
at::Tensor & addbmm_outf(const at::Tensor &self, const at::Tensor &batch1, const at::Tensor &batch2, const at::Scalar &beta, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:9824
void _foreach_zero_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24650
at::Tensor & index_put_outf(const at::Tensor &self, const c10::List< c10::optional< at::Tensor > > &indices, const at::Tensor &values, bool accumulate, at::Tensor &out)
Definition: Functions.h:21494
at::Tensor & _sparse_sum_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim)
Definition: Functions.h:23208
at::Tensor _foobar(const at::Tensor &self, bool arg1=true, bool arg2=true, bool arg3=true)
Definition: Functions.h:19874
inline ::std::tuple< at::Tensor, at::Tensor > max_pool3d_with_indices(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:13594
at::Tensor _log_softmax_backward_data(const at::Tensor &grad_output, const at::Tensor &output, int64_t dim, at::ScalarType input_dtype)
Definition: Functions.h:4237
at::Tensor & avg_pool3d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional< int64_t > divisor_override, at::Tensor &grad_input)
Definition: Functions.h:13491
void miopen_rnn_backward_outf(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, const at::Tensor &output, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, const at::Tensor &reserve, ::std::array< bool, 4 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::TensorList out3)
Definition: Functions.h:22006
at::Tensor special_gammaln(const at::Tensor &self)
Definition: Functions.h:17067
inline ::std::vector< at::Tensor > _foreach_clamp_max(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11628
const at::Tensor & resize_as_outf(const at::Tensor &self, const at::Tensor &the_template, c10::optional< at::MemoryFormat > memory_format, const at::Tensor &out)
Definition: Functions.h:23320
at::Tensor block_diag(at::TensorList tensors)
Definition: Functions.h:1405
at::Tensor & linalg_matmul_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:17977
void unbind_copy_outf(const at::Tensor &self, int64_t dim, at::TensorList out)
Definition: Functions.h:18958
at::Tensor & new_full_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef size, const at::Scalar &fill_value)
Definition: Functions.h:20961
void _foreach_addcdiv_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar &value=1)
Definition: Functions.h:12103
at::Tensor & _addmm_activation_outf(const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta, const at::Scalar &alpha, bool use_gelu, at::Tensor &out)
Definition: Functions.h:8627
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_slogdet_outf(const at::Tensor &A, at::Tensor &sign, at::Tensor &logabsdet)
Definition: Functions.h:18024
at::Tensor & polar_out(at::Tensor &out, const at::Tensor &abs, const at::Tensor &angle)
Definition: Functions.h:1678
at::Tensor addcdiv(const at::Tensor &self, const at::Tensor &tensor1, const at::Tensor &tensor2, const at::Scalar &value=1)
Definition: Functions.h:10425
at::Tensor & upsample_bicubic2d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15247
at::Tensor reflection_pad3d(const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13949
at::Tensor & _log_softmax_outf(const at::Tensor &self, int64_t dim, bool half_to_float, at::Tensor &out)
Definition: Functions.h:4232
at::Tensor & nll_loss_symint_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor &out)
Definition: Functions.h:12390
inline ::std::tuple< at::Tensor, at::Tensor > _pack_padded_sequence(const at::Tensor &input, const at::Tensor &lengths, bool batch_first)
Definition: Functions.h:9334
at::Tensor & _neg_view_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25442
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _transformer_decoder_only_layer_fwd_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &src, int64_t embed_dim, int64_t num_heads, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor &norm_weight_1, const at::Tensor &norm_bias_1, const at::Tensor &norm_weight_2, const at::Tensor &norm_bias_2, const at::Tensor &ffn_weight_1, const at::Tensor &ffn_bias_1, const at::Tensor &ffn_weight_2, const at::Tensor &ffn_bias_2, const c10::optional< at::Tensor > &mask={}, const c10::optional< at::Tensor > &incr_key={}, const c10::optional< at::Tensor > &incr_value={})
Definition: Functions.h:25980
at::Tensor & rrelu_(at::Tensor &self, const at::Scalar &lower=0.125, const at::Scalar &upper=0.3333333333333333, bool training=false, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:6648
int64_t __dispatch_stride(const at::Tensor &self, int64_t dim)
Definition: Functions.h:7462
at::Tensor & random_outf(const at::Tensor &self, int64_t from, c10::optional< int64_t > to, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:24142
at::Tensor & fractional_max_pool2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor &indices)
Definition: Functions.h:13515
at::Tensor _pdist_forward(const at::Tensor &self, double p=2)
Definition: Functions.h:5311
at::Tensor & special_xlog1py_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:17166
at::Tensor _embedding_bag_per_sample_weights_backward(const at::Tensor &grad, const at::Tensor &weight, const at::Tensor &indices, const at::Tensor &offsets, const at::Tensor &offset2bag, int64_t mode, int64_t padding_idx=-1)
Definition: Functions.h:2587
at::Tensor atan(const at::Tensor &self)
Definition: Functions.h:977
at::Tensor & empty_strided_outf(at::IntArrayRef size, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:21198
at::Tensor unsqueeze_copy(const at::Tensor &self, int64_t dim)
Definition: Functions.h:18904
at::Tensor & view_copy_outf(const at::Tensor &self, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:25840
at::Tensor & floor_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:3133
at::Tensor reflection_pad3d_symint(const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:13960
at::Tensor & _indices_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25757
at::Tensor & exp_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:2980
at::Tensor & upsample_bicubic2d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15192
at::Tensor & chain_matmul_outf(at::TensorList matrices, at::Tensor &out)
Definition: Functions.h:1438
at::Tensor & _transformer_encoder_layer_fwd_outf(const at::Tensor &src, int64_t embed_dim, int64_t num_heads, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor &norm_weight_1, const at::Tensor &norm_bias_1, const at::Tensor &norm_weight_2, const at::Tensor &norm_bias_2, const at::Tensor &ffn_weight_1, const at::Tensor &ffn_bias_1, const at::Tensor &ffn_weight_2, const at::Tensor &ffn_bias_2, const c10::optional< at::Tensor > &mask, c10::optional< int64_t > mask_type, at::Tensor &out)
Definition: Functions.h:25948
at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15731
at::Tensor & transpose_copy_outf(const at::Tensor &self, int64_t dim0, int64_t dim1, at::Tensor &out)
Definition: Functions.h:25743
inline ::std::tuple< at::Tensor &, at::Tensor & > min_outf(const at::Tensor &self, int64_t dim, bool keepdim, at::Tensor &min, at::Tensor &min_indices)
Definition: Functions.h:4648
at::Tensor istft(const at::Tensor &self, int64_t n_fft, c10::optional< int64_t > hop_length=c10::nullopt, c10::optional< int64_t > win_length=c10::nullopt, const c10::optional< at::Tensor > &window={}, bool center=true, bool normalized=false, c10::optional< bool > onesided=c10::nullopt, c10::optional< int64_t > length=c10::nullopt, bool return_complex=false)
Definition: Functions.h:7457
at::Tensor & index_fill_outf(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Scalar &value, at::Tensor &out)
Definition: Functions.h:24043
at::Tensor & log_sigmoid_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:13057
inline ::std::tuple< at::Tensor, at::Tensor > fractional_max_pool3d(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor &random_samples)
Definition: Functions.h:13538
inline ::std::tuple< at::Tensor, at::Tensor > nll_loss2d_forward(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index)
Definition: Functions.h:12687
inline ::std::tuple< at::Tensor,::std::vector< at::Tensor > > histogramdd(const at::Tensor &self, at::IntArrayRef bins, c10::optional< at::ArrayRef< double > > range=c10::nullopt, const c10::optional< at::Tensor > &weight={}, bool density=false)
Definition: Functions.h:10885
at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16061
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_group_norm_backward(const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:3481
at::Tensor expm1(const at::Tensor &self)
Definition: Functions.h:3004
at::Tensor & index_reduce_out(at::Tensor &out, const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &source, c10::string_view reduce, bool include_self=true)
Definition: Functions.h:9472
at::Tensor alias(const at::Tensor &self)
Definition: Functions.h:11563
at::Tensor & dequantize_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:23669
at::Tensor & special_ndtr_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17142
at::Tensor & rad2deg_(at::Tensor &self)
Definition: Functions.h:5396
at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong)
Definition: Functions.h:9890
at::Tensor & geometric_out(at::Tensor &out, const at::Tensor &self, double p, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:24236
at::Tensor & view_as_complex_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25420
at::Tensor & sum_outf(const at::Tensor &self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:7491
at::Tensor & special_expm1_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17016
at::Tensor & _softmax_backward_data_outf(const at::Tensor &grad_output, const at::Tensor &output, int64_t dim, at::ScalarType input_dtype, at::Tensor &grad_input)
Definition: Functions.h:7198
at::Tensor miopen_depthwise_convolution_symint(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:4788
at::Tensor & special_bessel_y0_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:19249
at::Tensor & gelu_outf(const at::Tensor &self, c10::string_view approximate, at::Tensor &out)
Definition: Functions.h:6692
at::Tensor & true_divide_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:2359
at::Tensor _reshape_alias_symint(const at::Tensor &self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride)
Definition: Functions.h:6589
at::Tensor & fft_fftfreq_outf(int64_t n, double d, at::Tensor &out)
Definition: Functions.h:17748
at::Tensor & sigmoid_(at::Tensor &self)
Definition: Functions.h:6926
inline ::std::tuple< at::Tensor &, at::Tensor & > mode_out(at::Tensor &values, at::Tensor &indices, const at::Tensor &self, int64_t dim=-1, bool keepdim=false)
Definition: Functions.h:4853
void _foreach_erf_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24732
at::Tensor & avg_pool2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional< int64_t > divisor_override)
Definition: Functions.h:13459
void _foreach_erfc_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24745
at::Tensor var(const at::Tensor &self, bool unbiased)
Definition: Functions.h:7991
at::Tensor & randint_like_out(at::Tensor &out, const at::Tensor &self, int64_t high, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:22346
at::Tensor & upsample_nearest2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15951
at::Tensor & pixel_unshuffle_out(at::Tensor &out, const at::Tensor &self, int64_t downscale_factor)
Definition: Functions.h:22213
at::Tensor avg_pool2d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional< int64_t > divisor_override)
Definition: Functions.h:13468
at::Tensor special_modified_bessel_k0(const at::Tensor &self)
Definition: Functions.h:19636
at::Tensor & atan_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:991
at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15808
inline ::std::tuple< at::Tensor &, at::Tensor & > geqrf_outf(const at::Tensor &self, at::Tensor &a, at::Tensor &tau)
Definition: Functions.h:10579
at::Tensor & unfold_copy_outf(const at::Tensor &self, int64_t dimension, int64_t size, int64_t step, at::Tensor &out)
Definition: Functions.h:25886
inline ::std::vector< at::Tensor > align_tensors(at::TensorList tensors)
Definition: Functions.h:143
at::Tensor hinge_embedding_loss(const at::Tensor &self, const at::Tensor &target, double margin=1.0, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:3449
at::Tensor & nll_loss_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, const at::Tensor &total_weight, at::Tensor &grad_input)
Definition: Functions.h:12522
at::Tensor & _embedding_bag_dense_backward_outf(const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offset2bag, const at::Tensor &bag_size, const at::Tensor &maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx, at::Tensor &out)
Definition: Functions.h:20800
at::Tensor & reflection_pad1d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:13729
at::Tensor upsample_trilinear3d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15533
at::Tensor & _standard_gamma_grad_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &output)
Definition: Functions.h:23136
at::Tensor addbmm(const at::Tensor &self, const at::Tensor &batch1, const at::Tensor &batch2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:9829
at::Tensor & leaky_relu_(at::Tensor &self, const at::Scalar &negative_slope=0.01)
Definition: Functions.h:13048
at::Tensor & isneginf_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:16954
at::Tensor & special_airy_ai_out(at::Tensor &out, const at::Tensor &x)
Definition: Functions.h:19197
at::Tensor & channel_shuffle_out(at::Tensor &out, const at::Tensor &self, int64_t groups)
Definition: Functions.h:22222
inline ::std::tuple< at::Tensor, at::Tensor > batch_norm_gather_stats_with_counts(const at::Tensor &input, const at::Tensor &mean, const at::Tensor &invstd, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, double momentum, double eps, const at::Tensor &counts)
Definition: Functions.h:5118
at::Tensor & adaptive_avg_pool2d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:13208
at::Tensor _test_optional_filled_intlist(const at::Tensor &values, at::OptionalIntArrayRef addends)
Definition: Functions.h:18595
at::Tensor replication_pad3d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14411
at::Tensor & special_i0e_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17300
void _foreach_floor_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24763
at::Tensor & _nested_tensor_size_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:22952
at::Tensor slow_conv_transpose2d_symint(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), at::IntArrayRef dilation=1)
Definition: Functions.h:16444
at::Tensor & _nested_tensor_strides_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:22961
at::Tensor miopen_depthwise_convolution(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:4777
at::Tensor & nextafter_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:10970
at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor &self, at::IntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:13298
at::Tensor & as_strided_scatter_outf(const at::Tensor &self, const at::Tensor &src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset, at::Tensor &out)
Definition: Functions.h:22737
at::Tensor upsample_bilinear2d_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15016
at::Tensor sgn(const at::Tensor &self)
Definition: Functions.h:410
at::Tensor & logaddexp2_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:4124
at::Tensor & view_copy_symint_outf(const at::Tensor &self, c10::SymIntArrayRef size, at::Tensor &out)
Definition: Functions.h:25862
at::Tensor & replication_pad1d_backward_symint_outf(const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:14136
at::Tensor & _copy_from_and_resize_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &dst)
Definition: Functions.h:20436
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _linalg_det(const at::Tensor &A)
Definition: Functions.h:17879
at::Tensor negative(const at::Tensor &self)
Definition: Functions.h:6469
at::Tensor fft_rfft2(const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17595
at::Tensor & full_like_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &fill_value, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:21258
at::Tensor & _embedding_bag_per_sample_weights_backward_out(at::Tensor &out, const at::Tensor &grad, const at::Tensor &weight, const at::Tensor &indices, const at::Tensor &offsets, const at::Tensor &offset2bag, int64_t mode, int64_t padding_idx=-1)
Definition: Functions.h:20833
at::Tensor & _conj_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25433
at::Tensor & _test_optional_intlist_outf(const at::Tensor &values, at::OptionalIntArrayRef addends, at::Tensor &out)
Definition: Functions.h:25316
at::Tensor & multi_margin_loss_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const at::Scalar &p, const at::Scalar &margin, const c10::optional< at::Tensor > &weight, int64_t reduction, at::Tensor &grad_input)
Definition: Functions.h:12305
at::Tensor __and__(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9638
at::Tensor & clip_outf(const at::Tensor &self, const c10::optional< at::Scalar > &min, const c10::optional< at::Scalar > &max, at::Tensor &out)
Definition: Functions.h:1640
at::Tensor int_repr(const at::Tensor &self)
Definition: Functions.h:9045
inline ::std::tuple< at::Tensor &, at::Tensor & > grid_sampler_3d_backward_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array< bool, 2 > output_mask)
Definition: Functions.h:21312
void unsafe_split_symint_out(at::TensorList out, const at::Tensor &self, c10::SymInt split_size, int64_t dim=0)
Definition: Functions.h:22792
at::Tensor & _sparse_log_softmax_out(at::Tensor &out, const at::Tensor &self, int64_t dim, bool half_to_float)
Definition: Functions.h:23262
at::Tensor & affine_grid_generator_outf(const at::Tensor &theta, at::IntArrayRef size, bool align_corners, at::Tensor &out)
Definition: Functions.h:20115
at::Tensor replication_pad3d_symint(const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14356
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_layer_norm(const at::Tensor &input, at::IntArrayRef normalized_shape, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, double eps)
Definition: Functions.h:3851
at::Tensor & replication_pad2d_outf(const at::Tensor &self, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:14180
at::Tensor & linalg_eigvalsh_out(at::Tensor &out, const at::Tensor &self, c10::string_view UPLO="L")
Definition: Functions.h:18109
at::Tensor & signbit_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:10762
ArrayRef< Tensor > TensorList
Definition: TensorBody.h:68
void unbind_copy_out(at::TensorList out, const at::Tensor &self, int64_t dim=0)
Definition: Functions.h:18954
at::Tensor _test_check_tensor(const at::Tensor &self)
Definition: Functions.h:637
at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor &input, at::OptionalSymIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14752
at::Tensor & reflection_pad2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13839
at::Tensor mkldnn_max_pool3d(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:4501
inline ::std::tuple< at::Tensor &, at::Tensor & > _ctc_loss_outf(const at::Tensor &log_probs, const at::Tensor &targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:20575
void _foreach_addcmul_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar &value, at::TensorList out)
Definition: Functions.h:24916
at::Tensor & repeat_interleave_outf(const at::Tensor &repeats, c10::optional< int64_t > output_size, at::Tensor &out)
Definition: Functions.h:22509
int64_t _debug_has_internal_overlap(const at::Tensor &self)
Definition: Functions.h:258
const at::Tensor & fft_ihfft2_out(const at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17642
at::Tensor _sparse_broadcast_to(const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:1316
at::Tensor replication_pad1d(const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14081
at::Tensor & smooth_l1_loss_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, int64_t reduction=at::Reduction::Mean, double beta=1.0)
Definition: Functions.h:12775
at::Tensor & upsample_bicubic2d_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15258
at::Tensor bmm(const at::Tensor &self, const at::Tensor &mat2)
Definition: Functions.h:1275
at::Tensor acosh(const at::Tensor &self)
Definition: Functions.h:781
at::Tensor & avg_pool3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional< int64_t > divisor_override=c10::nullopt)
Definition: Functions.h:13473
at::Tensor remainder(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10993
at::Tensor & detach_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25654
at::Tensor logical_xor(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:1215
inline ::std::vector< at::Tensor > split_copy(const at::Tensor &self, int64_t split_size, int64_t dim=0)
Definition: Functions.h:18835
at::Tensor & atan2_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:10776
at::Tensor & cosh_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:1916
inline ::std::vector< at::Tensor > _foreach_neg(at::TensorList self)
Definition: Functions.h:11993
bool _chunk_grad_outputs_efficient_attention(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, bool is_causal=false)
Definition: Functions.h:19157
void _foreach_acos_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24678
const at::Tensor & sparse_resize_and_clear_outf(const at::Tensor &self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor &out)
Definition: Functions.h:23474
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _transformer_decoder_only_layer_fwd(const at::Tensor &src, int64_t embed_dim, int64_t num_heads, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor &norm_weight_1, const at::Tensor &norm_bias_1, const at::Tensor &norm_weight_2, const at::Tensor &norm_bias_2, const at::Tensor &ffn_weight_1, const at::Tensor &ffn_bias_1, const at::Tensor &ffn_weight_2, const at::Tensor &ffn_bias_2, const c10::optional< at::Tensor > &mask={}, const c10::optional< at::Tensor > &incr_key={}, const c10::optional< at::Tensor > &incr_value={})
Definition: Functions.h:19206
at::Tensor _test_autograd_multiple_dispatch_view(const at::Tensor &self)
Definition: Functions.h:18635
at::Tensor im2col(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride)
Definition: Functions.h:16916
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > mkldnn_rnn_layer_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4, at::Tensor &out5, at::Tensor &out6, const at::Tensor &input, const at::Tensor &weight1, const at::Tensor &weight2, const at::Tensor &weight3, const at::Tensor &weight4, const at::Tensor &hx_, const at::Tensor &cx_tmp, const at::Tensor &output, const at::Tensor &hy_, const at::Tensor &cy_, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor &workspace)
Definition: Functions.h:21834
at::Tensor & special_gammainc_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:17436
at::Tensor & upsample_trilinear3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15423
at::Tensor & convolution_symint_outf(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, at::Tensor &out)
Definition: Functions.h:20292
at::Tensor & atan2_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:10772
at::Tensor & leaky_relu_outf(const at::Tensor &self, const at::Scalar &negative_slope, at::Tensor &out)
Definition: Functions.h:13024
at::Tensor & _cdist_forward_out(at::Tensor &out, const at::Tensor &x1, const at::Tensor &x2, double p, c10::optional< int64_t > compute_mode)
Definition: Functions.h:22168
at::Tensor & cumprod_outf(const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:2090
at::Tensor & stack_out(at::Tensor &out, at::TensorList tensors, int64_t dim=0)
Definition: Functions.h:7382
at::Tensor & addmv_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mat, const at::Tensor &vec, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:594
inline ::std::tuple< at::Tensor, at::Tensor > nll_loss_forward_symint(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index)
Definition: Functions.h:12500
at::Tensor mse_loss(const at::Tensor &self, const at::Tensor &target, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:12263
at::Tensor & special_psi_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17048
at::Tensor conv_transpose2d(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1)
Definition: Functions.h:1862
at::Tensor & clamp_max_(at::Tensor &self, const at::Scalar &max)
Definition: Functions.h:1550
at::Tensor & linalg_vecdot_outf(const at::Tensor &x, const at::Tensor &y, int64_t dim, at::Tensor &out)
Definition: Functions.h:17991
void _foreach_erf_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24736
at::Tensor & quantize_per_tensor_outf(const at::Tensor &self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor &out)
Definition: Functions.h:23633
at::Tensor _test_warn_in_autograd(const at::Tensor &self)
Definition: Functions.h:18620
at::Tensor lgamma(const at::Tensor &self)
Definition: Functions.h:10673
void _foreach_mul_out(at::TensorList out, at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:24439
inline ::std::tuple< at::Tensor, at::Tensor > quantized_lstm_cell(const at::Tensor &input, at::TensorList hx, const at::Tensor &w_ih, const at::Tensor &w_hh, const at::Tensor &b_ih, const at::Tensor &b_hh, const at::Tensor &packed_ih, const at::Tensor &packed_hh, const at::Tensor &col_offsets_ih, const at::Tensor &col_offsets_hh, const at::Scalar &scale_ih, const at::Scalar &scale_hh, const at::Scalar &zero_point_ih, const at::Scalar &zero_point_hh)
Definition: Functions.h:9314
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_group_norm_symint(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps)
Definition: Functions.h:3470
at::Tensor & greater_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10159
at::Tensor & _copy_from_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &dst, bool non_blocking=false)
Definition: Functions.h:20427
void _foreach_log2_(at::TensorList self)
Definition: Functions.h:11988
inline ::std::tuple< at::Tensor, at::Tensor > adaptive_max_pool2d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13398
at::Tensor & _mkldnn_reshape_outf(const at::Tensor &self, at::IntArrayRef shape, at::Tensor &out)
Definition: Functions.h:22518
at::Tensor eq(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10000
at::Tensor & bitwise_and_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:9605
at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor &ccol_indices, const at::Tensor &row_indices, const at::Tensor &values, at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:8763
inline ::std::tuple< at::Tensor &, at::Tensor & > topk_outf(const at::Tensor &self, int64_t k, int64_t dim, bool largest, bool sorted, at::Tensor &values, at::Tensor &indices)
Definition: Functions.h:11265
at::Tensor & blackman_window_outf(int64_t window_length, at::Tensor &out)
Definition: Functions.h:20192
at::Tensor instance_norm(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled)
Definition: Functions.h:3670
at::Tensor nll_loss_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, const at::Tensor &total_weight)
Definition: Functions.h:12555
at::Tensor le(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10084
at::Tensor & adaptive_avg_pool2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13175
at::Tensor conj_physical(const at::Tensor &self)
Definition: Functions.h:449
at::Tensor & upsample_trilinear3d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15445
at::Tensor copy_sparse_to_sparse(const at::Tensor &self, const at::Tensor &src, bool non_blocking=false)
Definition: Functions.h:23534
at::Tensor & _embedding_bag_dense_backward_symint_out(at::Tensor &out, const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offset2bag, const at::Tensor &bag_size, const at::Tensor &maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx=-1)
Definition: Functions.h:20811
at::Tensor mkldnn_reorder_conv2d_weight(const at::Tensor &self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=c10::nullopt)
Definition: Functions.h:8970
at::Tensor adaptive_max_pool2d_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &indices)
Definition: Functions.h:13412
at::Tensor & unsqueeze_copy_out(at::Tensor &out, const at::Tensor &self, int64_t dim)
Definition: Functions.h:25748
at::Tensor _logcumsumexp(const at::Tensor &self, int64_t dim)
Definition: Functions.h:4251
inline ::std::tuple< at::Tensor &, at::Tensor & > grid_sampler_2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array< bool, 2 > output_mask, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:21289
at::Tensor & to_sparse_bsr_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef blocksize, c10::optional< int64_t > dense_dim=c10::nullopt)
Definition: Functions.h:23575
inline ::std::vector< at::Tensor > _foreach_div(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11608
at::Tensor & arcsin_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:968
at::Tensor & lcm_(at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3328
at::Tensor & _efficientzerotensor_out(at::Tensor &out, at::IntArrayRef size)
Definition: Functions.h:23118
at::Tensor & linspace_outf(const at::Scalar &start, const at::Scalar &end, int64_t steps, at::Tensor &out)
Definition: Functions.h:4025
at::Tensor & addr_outf(const at::Tensor &self, const at::Tensor &vec1, const at::Tensor &vec2, const at::Scalar &beta, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:612
at::Tensor & masked_scatter_outf(const at::Tensor &self, const at::Tensor &mask, const at::Tensor &source, at::Tensor &out)
Definition: Functions.h:24007
at::Tensor & slice_backward_out(at::Tensor &out, const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step)
Definition: Functions.h:22585
at::Tensor & special_i1e_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17324
at::Tensor pdist(const at::Tensor &self, double p=2)
Definition: Functions.h:5306
at::Tensor fbgemm_linear_int8_weight_fp32_activation(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &packed, const at::Tensor &col_offsets, const at::Scalar &weight_scale, const at::Scalar &weight_zero_point, const at::Tensor &bias)
Definition: Functions.h:3953
void _foreach_reciprocal_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24876
at::Tensor quantized_max_pool2d(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:4516
at::Tensor & multilabel_margin_loss_outf(const at::Tensor &self, const at::Tensor &target, int64_t reduction, at::Tensor &out)
Definition: Functions.h:12319
at::Tensor & arcsinh_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:848
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _scaled_dot_product_efficient_attention_backward(const at::Tensor &grad_out_, const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, const at::Tensor &out, const at::Tensor &logsumexp, bool is_causal=false, bool chunk_grad_outputs=false)
Definition: Functions.h:19152
inline ::std::tuple< at::Tensor &, at::Tensor & > fake_quantize_per_tensor_affine_cachemask_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max)
Definition: Functions.h:23728
inline ::std::tuple< at::Tensor &, at::Tensor & > log_sigmoid_forward_out(at::Tensor &output, at::Tensor &buffer, const at::Tensor &self)
Definition: Functions.h:13067
void _foreach_round_(at::TensorList self)
Definition: Functions.h:12048
at::Tensor & dot_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &tensor)
Definition: Functions.h:2378
at::Tensor & fix_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:7924
at::Tensor & embedding_renorm_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &indices, double max_norm, double norm_type)
Definition: Functions.h:20757
at::Tensor & multi_margin_loss_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, const at::Scalar &p=1, const at::Scalar &margin=1, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:12287
at::Tensor & elu_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Scalar &alpha, const at::Scalar &scale, const at::Scalar &input_scale, bool is_result, const at::Tensor &self_or_result)
Definition: Functions.h:12873
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor,::std::vector< at::Tensor > > _cudnn_rnn_backward(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, const at::Tensor &output, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, const at::Tensor &reserve, ::std::array< bool, 4 > output_mask)
Definition: Functions.h:227
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > miopen_batch_norm(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, bool training, double exponential_average_factor, double epsilon)
Definition: Functions.h:4723
at::Tensor & expm1_(at::Tensor &self)
Definition: Functions.h:3009
at::Tensor & mkldnn_convolution_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:21781
at::Tensor & count_nonzero_outf(const at::Tensor &self, at::IntArrayRef dim, at::Tensor &out)
Definition: Functions.h:20449
at::Tensor nonzero(const at::Tensor &self)
Definition: Functions.h:10349
inline ::std::tuple< at::Tensor, at::Tensor > _prelu_kernel_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &weight)
Definition: Functions.h:6683
at::Tensor & _nested_from_padded_out(at::Tensor &out, const at::Tensor &padded, const at::Tensor &cpu_nested_shape_example, bool fuse_transform_0213=false)
Definition: Functions.h:22939
at::Tensor & linalg_solve_outf(const at::Tensor &A, const at::Tensor &B, bool left, at::Tensor &out)
Definition: Functions.h:18449
at::Tensor & special_entr_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:16978
at::Tensor & fft_fft_out(at::Tensor &out, const at::Tensor &self, c10::optional< int64_t > n=c10::nullopt, int64_t dim=-1, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17488
at::Tensor deg2rad(const at::Tensor &self)
Definition: Functions.h:5410
at::Tensor repeat_interleave_symint(const at::Tensor &self, c10::SymInt repeats, c10::optional< int64_t > dim=c10::nullopt, c10::optional< int64_t > output_size=c10::nullopt)
Definition: Functions.h:6523
at::Tensor segment_reduce(const at::Tensor &data, c10::string_view reduce, const c10::optional< at::Tensor > &lengths={}, const c10::optional< at::Tensor > &indices={}, const c10::optional< at::Tensor > &offsets={}, int64_t axis=0, bool unsafe=false, const c10::optional< at::Scalar > &initial=c10::nullopt)
Definition: Functions.h:18645
at::Tensor & linalg_eigvalsh_outf(const at::Tensor &self, c10::string_view UPLO, at::Tensor &out)
Definition: Functions.h:18113
at::Tensor cholesky(const at::Tensor &self, bool upper=false)
Definition: Functions.h:10523
at::Tensor & as_strided_copy_symint_outf(const at::Tensor &self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional< c10::SymInt > storage_offset, at::Tensor &out)
Definition: Functions.h:25480
inline ::std::tuple< at::Tensor, at::Tensor > topk(const at::Tensor &self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true)
Definition: Functions.h:11270
at::Tensor & _test_warn_in_autograd_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25339
at::Tensor select_scatter_symint(const at::Tensor &self, const at::Tensor &src, int64_t dim, c10::SymInt index)
Definition: Functions.h:7113
at::Tensor silu(const at::Tensor &self)
Definition: Functions.h:6864
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_layer_norm_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &grad_out, const at::Tensor &input, at::IntArrayRef normalized_shape, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:21566
inline ::std::tuple< at::Tensor &, at::Tensor & > cummin_out(at::Tensor &values, at::Tensor &indices, const at::Tensor &self, int64_t dim)
Definition: Functions.h:2048
at::Tensor special_shifted_chebyshev_polynomial_w(const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19818
at::Tensor & _upsample_nearest_exact3d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:16160
at::Tensor & index_copy_out(at::Tensor &out, const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &source)
Definition: Functions.h:3636
at::Tensor & _upsample_nearest_exact2d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15896
at::Tensor & tanh_(at::Tensor &self)
Definition: Functions.h:7710
at::Tensor logical_not(const at::Tensor &self)
Definition: Functions.h:1201
at::Tensor & _transformer_encoder_layer_fwd_out(at::Tensor &out, const at::Tensor &src, int64_t embed_dim, int64_t num_heads, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor &norm_weight_1, const at::Tensor &norm_bias_1, const at::Tensor &norm_weight_2, const at::Tensor &norm_bias_2, const at::Tensor &ffn_weight_1, const at::Tensor &ffn_bias_1, const at::Tensor &ffn_weight_2, const at::Tensor &ffn_bias_2, const c10::optional< at::Tensor > &mask={}, c10::optional< int64_t > mask_type=c10::nullopt)
Definition: Functions.h:25944
at::Tensor & special_round_out(at::Tensor &out, const at::Tensor &self, int64_t decimals=0)
Definition: Functions.h:17408
inline ::std::vector< at::Tensor > broadcast_tensors(at::TensorList tensors)
Definition: Functions.h:1289
at::Tensor & lt_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10187
at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:16226
at::Tensor & sinh_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:7011
at::Tensor _neg_view(const at::Tensor &self)
Definition: Functions.h:478
at::Tensor & sum_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:7487
at::Tensor & _unsafe_view_symint_outf(const at::Tensor &self, c10::SymIntArrayRef size, at::Tensor &out)
Definition: Functions.h:23071
at::Tensor & upsample_nearest2d_outf(const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15830
at::Tensor & fft_fft_outf(const at::Tensor &self, c10::optional< int64_t > n, int64_t dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17492
at::Tensor zeros_like(const at::Tensor &self, at::TensorOptions options={}, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:8242
at::Tensor & nll_loss2d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor &total_weight)
Definition: Functions.h:12731
at::Tensor & miopen_depthwise_convolution_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:21949
at::Tensor & upsample_nearest3d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16237
at::Tensor & conj_physical_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:454
void _foreach_maximum_(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11643
at::Tensor & i0_(at::Tensor &self)
Definition: Functions.h:10725
at::Tensor unsqueeze(const at::Tensor &self, int64_t dim)
Definition: Functions.h:7981
void _foreach_log2_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24795
inline ::std::tuple< at::Tensor, at::Tensor > _scaled_dot_product_attention(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, const c10::optional< at::Tensor > &attn_mask={}, double dropout_p=0.0, bool need_attn_weights=false, bool is_causal=false)
Definition: Functions.h:19122
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > lu_unpack(const at::Tensor &LU_data, const at::Tensor &LU_pivots, bool unpack_data=true, bool unpack_pivots=true)
Definition: Functions.h:10636
at::Tensor ldexp(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3993
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_inv_ex_out(at::Tensor &inverse, at::Tensor &info, const at::Tensor &A, bool check_errors=false)
Definition: Functions.h:18137
at::Tensor special_polygamma(int64_t n, const at::Tensor &self)
Definition: Functions.h:17347
at::Tensor & reflection_pad3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13971
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor > _linalg_slogdet(const at::Tensor &A)
Definition: Functions.h:18001
at::Tensor _copy_from(const at::Tensor &self, const at::Tensor &dst, bool non_blocking=false)
Definition: Functions.h:1877
inline ::std::tuple< at::Tensor &, at::Tensor & > fractional_max_pool3d_outf(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor &random_samples, at::Tensor &output, at::Tensor &indices)
Definition: Functions.h:13533
at::Tensor conv_transpose1d(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1)
Definition: Functions.h:1857
at::Tensor mish_backward(const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:6916
at::Tensor & take_along_dim_outf(const at::Tensor &self, const at::Tensor &indices, c10::optional< int64_t > dim, at::Tensor &out)
Definition: Functions.h:10261
at::Tensor & to_padded_tensor_symint_out(at::Tensor &out, const at::Tensor &self, double padding, at::OptionalSymIntArrayRef output_size=c10::nullopt)
Definition: Functions.h:25922
at::Tensor & _slow_conv2d_forward_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor &output)
Definition: Functions.h:16539
at::Tensor & isnan_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:21513
at::Tensor & reflection_pad1d_outf(const at::Tensor &self, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:13652
at::Tensor & special_expm1_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17020
at::Tensor maximum(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:11055
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor > _embedding_bag_forward_only(const at::Tensor &weight, const at::Tensor &indices, const at::Tensor &offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional< at::Tensor > &per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1)
Definition: Functions.h:2482
at::Tensor binary_cross_entropy_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:1135
at::Tensor & slice_scatter_symint_outf(const at::Tensor &self, const at::Tensor &src, int64_t dim, c10::optional< c10::SymInt > start, c10::optional< c10::SymInt > end, c10::SymInt step, at::Tensor &out)
Definition: Functions.h:22662
at::Tensor arccos(const at::Tensor &self)
Definition: Functions.h:502
void miopen_rnn_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::TensorList out3, const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, const at::Tensor &output, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, const at::Tensor &reserve, ::std::array< bool, 4 > output_mask)
Definition: Functions.h:22002
at::Tensor _test_autograd_multiple_dispatch_view_copy(const at::Tensor &self)
Definition: Functions.h:18640
at::Tensor & special_xlogy_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:17212
at::Tensor & subtract_outf(const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:8541
at::Tensor & randint_out(at::Tensor &out, int64_t high, at::IntArrayRef size)
Definition: Functions.h:5887
at::Tensor convolution(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups)
Definition: Functions.h:1709
at::Tensor native_channel_shuffle(const at::Tensor &self, int64_t groups)
Definition: Functions.h:5371
at::Tensor normal_functional(const at::Tensor &self, double mean=0, double std=1, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:11428
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _native_decoder_only_multi_head_attention_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, int64_t embed_dim, int64_t num_head, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, const c10::optional< at::Tensor > &mask={}, const c10::optional< at::Tensor > &incr_key={}, const c10::optional< at::Tensor > &incr_value={}, bool need_weights=true, bool average_attn_weights=true)
Definition: Functions.h:25989
inline ::std::vector< at::Tensor > _foreach_add(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11578
at::Tensor randn_like(const at::Tensor &self, at::TensorOptions options={}, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:6345
at::Tensor fft_rfftn(const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17679
at::Tensor & _index_put_impl_out(at::Tensor &out, const at::Tensor &self, const c10::List< c10::optional< at::Tensor > > &indices, const at::Tensor &values, bool accumulate=false, bool unsafe=false)
Definition: Functions.h:21499
at::Tensor & bartlett_window_out(at::Tensor &out, int64_t window_length)
Definition: Functions.h:20120
at::Tensor & reflection_pad2d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:13861
at::Tensor tensordot(const at::Tensor &self, const at::Tensor &other, at::IntArrayRef dims_self, at::IntArrayRef dims_other)
Definition: Functions.h:7724
void _foreach_log10_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24777
at::Tensor & special_scaled_modified_bessel_k1_out(at::Tensor &out, const at::Tensor &x)
Definition: Functions.h:19683
at::Tensor & _make_per_tensor_quantized_tensor_outf(const at::Tensor &self, double scale, int64_t zero_point, at::Tensor &out)
Definition: Functions.h:23714
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _fake_quantize_learnable_per_tensor_affine_backward(const at::Tensor &grad, const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0)
Definition: Functions.h:9090
at::Tensor & kaiser_window_outf(int64_t window_length, at::Tensor &out)
Definition: Functions.h:21379
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _linalg_svd_outf(const at::Tensor &A, bool full_matrices, bool compute_uv, c10::optional< c10::string_view > driver, at::Tensor &U, at::Tensor &S, at::Tensor &Vh)
Definition: Functions.h:18295
at::Tensor fft_fft(const at::Tensor &self, c10::optional< int64_t > n=c10::nullopt, int64_t dim=-1, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17483
at::Tensor _nested_view_from_buffer(const at::Tensor &self, const at::Tensor &nested_size, const at::Tensor &nested_strides, at::IntArrayRef offsets)
Definition: Functions.h:7871
at::Tensor & repeat_symint_outf(const at::Tensor &self, c10::SymIntArrayRef repeats, at::Tensor &out)
Definition: Functions.h:22494
at::Tensor & frac_(at::Tensor &self)
Definition: Functions.h:3166
inline ::std::vector< at::Tensor > tensor_split(const at::Tensor &self, int64_t sections, int64_t dim=0)
Definition: Functions.h:1453
at::Tensor _adaptive_avg_pool3d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13353
at::Tensor & special_gammaincc_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:17450
at::Tensor & hardsigmoid_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:12934
at::Tensor & arcsinh_(at::Tensor &self)
Definition: Functions.h:843
inline ::std::tuple< at::Tensor, at::Tensor > fake_quantize_per_channel_affine_cachemask(const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t axis, int64_t quant_min, int64_t quant_max)
Definition: Functions.h:9100
at::Tensor slow_conv_transpose3d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:16499
at::Tensor & arctanh_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:890
at::Tensor & fft_rfft2_outf(const at::Tensor &self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17604
at::Tensor & embedding_renorm_(at::Tensor &self, const at::Tensor &indices, double max_norm, double norm_type)
Definition: Functions.h:2472
at::Tensor & _upsample_bilinear2d_aa_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15049
at::Tensor & take_along_dim_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &indices, c10::optional< int64_t > dim=c10::nullopt)
Definition: Functions.h:10257
at::Tensor & clamp_outf(const at::Tensor &self, const c10::optional< at::Scalar > &min, const c10::optional< at::Scalar > &max, at::Tensor &out)
Definition: Functions.h:1526
at::Tensor binary_cross_entropy(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:1121
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > mkldnn_rnn_layer_outf(const at::Tensor &input, const at::Tensor &weight0, const at::Tensor &weight1, const at::Tensor &weight2, const at::Tensor &weight3, const at::Tensor &hx_, const at::Tensor &cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3)
Definition: Functions.h:21829
at::Tensor & special_scaled_modified_bessel_k1_outf(const at::Tensor &x, at::Tensor &out)
Definition: Functions.h:19687
at::Tensor & mkldnn_linear_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias={})
Definition: Functions.h:21619
Tensor conj(const Tensor &tensor)
Definition: Functions.h:26086
at::Tensor & special_laguerre_polynomial_l_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19539
at::Tensor fft_ifftshift(const at::Tensor &self, at::OptionalIntArrayRef dim=c10::nullopt)
Definition: Functions.h:17776
at::Tensor & binary_cross_entropy_with_logits_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, const c10::optional< at::Tensor > &pos_weight={}, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:20170
at::Tensor & _adaptive_avg_pool2d_outf(const at::Tensor &self, at::IntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:25058
at::Tensor & _fake_quantize_learnable_per_channel_affine_outf(const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor &out)
Definition: Functions.h:23768
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > linalg_ldl_factor_ex_out(at::Tensor &LD, at::Tensor &pivots, at::Tensor &info, const at::Tensor &self, bool hermitian=false, bool check_errors=false)
Definition: Functions.h:17917
at::Tensor isposinf(const at::Tensor &self)
Definition: Functions.h:16931
at::Tensor absolute(const at::Tensor &self)
Definition: Functions.h:372
inline ::std::vector< at::Tensor > hsplit(const at::Tensor &self, int64_t sections)
Definition: Functions.h:7313
at::Tensor _cast_Float(const at::Tensor &self, bool non_blocking=false)
Definition: Functions.h:98
at::Tensor & _cudnn_rnn_flatten_weight_outf(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor &out)
Definition: Functions.h:19918
at::Tensor & renorm_outf(const at::Tensor &self, const at::Scalar &p, int64_t dim, const at::Scalar &maxnorm, at::Tensor &out)
Definition: Functions.h:11307
at::Tensor & lgamma_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:10668
void _foreach_sinh_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24844
at::Tensor & bitwise_right_shift_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:9792
at::Tensor & miopen_convolution_symint_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor &out)
Definition: Functions.h:21894
at::Tensor & _adaptive_avg_pool2d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size)
Definition: Functions.h:25069
void _foreach_sqrt_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24660
at::Tensor & glu_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, int64_t dim)
Definition: Functions.h:12906
at::Tensor not_equal(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9972
at::Tensor & logical_not_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:1206
void _foreach_mul_outf(at::TensorList self, const at::Scalar &scalar, at::TensorList out)
Definition: Functions.h:24443
int64_t numel(const Tensor &tensor)
Definition: Functions.h:26050
at::Tensor & miopen_convolution_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor &out)
Definition: Functions.h:21872
at::Tensor & convolution_overrideable_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups)
Definition: Functions.h:20347
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_layer_norm_outf(const at::Tensor &input, at::IntArrayRef normalized_shape, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, double eps, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21533
void _foreach_frac_(at::TensorList self)
Definition: Functions.h:12068
at::Tensor & logspace_outf(const at::Scalar &start, const at::Scalar &end, int64_t steps, double base, at::Tensor &out)
Definition: Functions.h:4199
at::Tensor scatter_reduce(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &src, c10::string_view reduce, bool include_self=true)
Definition: Functions.h:9591
at::Tensor bernoulli(const at::Tensor &self, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:1097
at::Tensor clamp_max(const at::Tensor &self, const at::Scalar &max)
Definition: Functions.h:1540
at::Tensor & miopen_convolution_transpose_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor &out)
Definition: Functions.h:21916
at::Tensor & softshrink_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &lambd)
Definition: Functions.h:13161
inline ::std::tuple< at::Tensor, at::Tensor > cummax(const at::Tensor &self, int64_t dim)
Definition: Functions.h:2010
at::Tensor & upsample_bilinear2d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:14983
at::Tensor & view_as_real_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25411
at::Tensor & embedding_dense_backward_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq)
Definition: Functions.h:20713
at::Tensor addmm(const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:8618
at::Tensor max_pool3d_with_indices_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor &indices)
Definition: Functions.h:13608
at::Tensor & multi_margin_loss_outf(const at::Tensor &self, const at::Tensor &target, const at::Scalar &p, const at::Scalar &margin, const c10::optional< at::Tensor > &weight, int64_t reduction, at::Tensor &out)
Definition: Functions.h:12291
at::Tensor & _ctc_loss_backward_outf(const at::Tensor &grad, const at::Tensor &log_probs, const at::Tensor &targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor &neg_log_likelihood, const at::Tensor &log_alpha, int64_t blank, bool zero_infinity, at::Tensor &out)
Definition: Functions.h:20593
at::Tensor & amax_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim={}, bool keepdim=false)
Definition: Functions.h:4462
at::Tensor _softmax_backward_data(const at::Tensor &grad_output, const at::Tensor &output, int64_t dim, at::ScalarType input_dtype)
Definition: Functions.h:7189
void _foreach_lerp_outf(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out)
Definition: Functions.h:24970
at::Tensor & tensordot_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other, at::IntArrayRef dims_self, at::IntArrayRef dims_other)
Definition: Functions.h:7729
at::Tensor & ones_out(at::Tensor &out, at::IntArrayRef size)
Definition: Functions.h:5228
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor > batch_norm_backward_reduce(const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &invstd, const c10::optional< at::Tensor > &weight, bool input_g, bool weight_g, bool bias_g)
Definition: Functions.h:5128
at::Tensor i0(const at::Tensor &self)
Definition: Functions.h:10720
at::Tensor & maximum_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:11064
at::Tensor copysign(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:1182
at::Tensor & constant_pad_nd_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef pad, const at::Scalar &value=0)
Definition: Functions.h:20237
at::Tensor diagonal_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2)
Definition: Functions.h:2218
at::Tensor col2im(const at::Tensor &self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride)
Definition: Functions.h:16871
at::Tensor & from_file_outf(c10::string_view filename, c10::optional< bool > shared, c10::optional< int64_t > size, at::Tensor &out)
Definition: Functions.h:21271
inline ::std::vector< at::Tensor > _histogramdd_bin_edges(const at::Tensor &self, at::IntArrayRef bins, c10::optional< at::ArrayRef< double > > range=c10::nullopt, const c10::optional< at::Tensor > &weight={}, bool density=false)
Definition: Functions.h:10870
at::Tensor & log_normal_outf(const at::Tensor &self, double mean, double std, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:24212
inline ::std::tuple< at::Tensor, at::Tensor > _weight_norm_interface_backward(const at::Tensor &grad_w, const at::Tensor &saved_v, const at::Tensor &saved_g, const at::Tensor &saved_norms, int64_t dim)
Definition: Functions.h:8126
at::Tensor & upsample_nearest1d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15687
inline ::std::tuple< at::Tensor &, at::Tensor & > _fused_moving_avg_obs_fq_helper_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &self, const at::Tensor &observer_on, const at::Tensor &fake_quant_on, at::Tensor &running_min, at::Tensor &running_max, at::Tensor &scale, at::Tensor &zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false)
Definition: Functions.h:23773
at::Tensor & _grid_sampler_2d_cpu_fallback_outf(const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor &out)
Definition: Functions.h:21298
at::Tensor & _fake_quantize_learnable_per_tensor_affine_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0)
Definition: Functions.h:23746
at::Tensor diag(const at::Tensor &self, int64_t diagonal=0)
Definition: Functions.h:9843
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _convolution_double_backward_symint(const c10::optional< at::Tensor > &ggI, const c10::optional< at::Tensor > &ggW, const c10::optional< at::Tensor > &ggb, const at::Tensor &gO, const at::Tensor &weight, const at::Tensor &self, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:1806
at::Tensor special_bessel_y1(const at::Tensor &self)
Definition: Functions.h:19258
at::Tensor & maximum_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:11060
at::Tensor slow_conv3d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0)
Definition: Functions.h:16695
at::Tensor & add_outf(const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:545
at::Tensor bilinear(const at::Tensor &input1, const at::Tensor &input2, const at::Tensor &weight, const c10::optional< at::Tensor > &bias={})
Definition: Functions.h:1116
at::Tensor & remainder_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:10988
at::Tensor & unfold_backward_symint_outf(const at::Tensor &grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor &out)
Definition: Functions.h:24373
at::Tensor & randint_symint_outf(int64_t high, c10::SymIntArrayRef size, at::Tensor &out)
Definition: Functions.h:5920
at::Tensor & sparse_mask_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mask)
Definition: Functions.h:23484
void unsafe_split_with_sizes_out(at::TensorList out, const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:22814
at::Tensor & reciprocal_(at::Tensor &self)
Definition: Functions.h:6436
at::Tensor & vdot_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:2396
at::Tensor & _adaptive_avg_pool3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:25100
void _foreach_round_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24853
at::Tensor & mkldnn_convolution_symint_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor &out)
Definition: Functions.h:21814
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_group_norm_backward_symint(const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:3492
bool can_cast(at::ScalarType from, at::ScalarType to)
Definition: Functions.h:9194
at::Tensor nll_loss_nd(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100)
Definition: Functions.h:12401
at::Tensor elu(const at::Tensor &self, const at::Scalar &alpha=1, const at::Scalar &scale=1, const at::Scalar &input_scale=1)
Definition: Functions.h:12868
at::Tensor & _unsafe_view_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef size)
Definition: Functions.h:23060
at::Tensor & cumsum_out(at::Tensor &out, const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:2119
at::Tensor prelu(const at::Tensor &self, const at::Tensor &weight)
Definition: Functions.h:6673
at::Tensor & arcsinh_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:852
at::Tensor hardtanh_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &min_val, const at::Scalar &max_val)
Definition: Functions.h:12986
inline ::std::vector< at::Tensor > split_with_sizes_copy(const at::Tensor &self, at::IntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:18857
at::Tensor isclose(const at::Tensor &self, const at::Tensor &other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false)
Definition: Functions.h:3675
at::Tensor & erf_(at::Tensor &self)
Definition: Functions.h:2933
at::Tensor & sspaddmm_outf(const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:7372
at::Tensor & _reshape_alias_copy_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride)
Definition: Functions.h:25562
at::Tensor & linalg_tensorinv_outf(const at::Tensor &self, int64_t ind, at::Tensor &out)
Definition: Functions.h:18463
at::Tensor & grid_sampler_3d_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners)
Definition: Functions.h:21303
at::Tensor & replication_pad1d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14103
at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor &crow_indices, const at::Tensor &col_indices, const at::Tensor &values, at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:8754
at::Tensor & bernoulli_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:1102
at::Tensor special_logsumexp(const at::Tensor &self, at::IntArrayRef dim, bool keepdim=false)
Definition: Functions.h:17361
at::Tensor view_as_real(const at::Tensor &self)
Definition: Functions.h:400
int64_t _cufft_get_plan_cache_size(int64_t device_index)
Definition: Functions.h:3602
at::Tensor _cast_Long(const at::Tensor &self, bool non_blocking=false)
Definition: Functions.h:108
at::Tensor & diag_out(at::Tensor &out, const at::Tensor &self, int64_t diagonal=0)
Definition: Functions.h:9834
at::Tensor mkldnn_reorder_conv3d_weight(const at::Tensor &self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1)
Definition: Functions.h:8975
at::Tensor & ones_symint_out(at::Tensor &out, c10::SymIntArrayRef size)
Definition: Functions.h:5250
at::Tensor & slow_conv_transpose3d_symint_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:16488
at::Tensor & _cudnn_rnn_flatten_weight_out(at::Tensor &out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional)
Definition: Functions.h:19907
at::Tensor & row_indices_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25824
at::Tensor & block_diag_outf(at::TensorList tensors, at::Tensor &out)
Definition: Functions.h:20210
at::Tensor & leaky_relu_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &negative_slope, bool self_is_result, at::Tensor &grad_input)
Definition: Functions.h:13038
at::Tensor & replication_pad1d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14037
at::Tensor & bitwise_left_shift_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:9749
const at::Tensor & _resize_output_outf(const at::Tensor &self, at::IntArrayRef size, at::Device device, const at::Tensor &out)
Definition: Functions.h:21159
at::Tensor concat(at::TensorList tensors, int64_t dim=0)
Definition: Functions.h:1349
at::Tensor & log2_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:4101
at::Tensor & linalg_householder_product_outf(const at::Tensor &input, const at::Tensor &tau, at::Tensor &out)
Definition: Functions.h:18127
void _foreach_ceil_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24709
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > linalg_lu_out(at::Tensor &P, at::Tensor &L, at::Tensor &U, const at::Tensor &A, bool pivot=true)
Definition: Functions.h:17856
at::Tensor _fft_c2c(const at::Tensor &self, at::IntArrayRef dim, int64_t normalization, bool forward)
Definition: Functions.h:3531
at::Tensor & sparse_sampled_addmm_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:8585
at::Tensor & diag_outf(const at::Tensor &self, int64_t diagonal, at::Tensor &out)
Definition: Functions.h:9838
at::Tensor & special_erfc_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17104
at::Tensor _pad_circular(const at::Tensor &self, at::IntArrayRef pad)
Definition: Functions.h:14433
at::Tensor & special_multigammaln_outf(const at::Tensor &self, int64_t p, at::Tensor &out)
Definition: Functions.h:17473
at::Tensor & _upsample_nearest_exact1d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, c10::optional< double > scales, at::Tensor &out)
Definition: Functions.h:15632
at::Tensor & sin_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:6973
at::Tensor _upsample_bilinear2d_aa(const at::Tensor &input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14543
at::Tensor resolve_conj(const at::Tensor &self)
Definition: Functions.h:468
at::Tensor _fft_c2r(const at::Tensor &self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size)
Definition: Functions.h:3517
at::Tensor & arctanh_(at::Tensor &self)
Definition: Functions.h:881
at::Tensor & copysign_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:1173
at::Tensor affine_grid_generator_backward(const at::Tensor &grad, at::IntArrayRef size, bool align_corners)
Definition: Functions.h:622
at::Tensor unflatten(const at::Tensor &self, int64_t dim, at::IntArrayRef sizes)
Definition: Functions.h:3093
void _foreach_tan_(at::TensorList self)
Definition: Functions.h:12008
at::Tensor & special_erf_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17090
at::Tensor & squeeze_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25707
at::Tensor & arccosh_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:810
at::Tensor & block_diag_out(at::Tensor &out, at::TensorList tensors)
Definition: Functions.h:20206
at::Tensor & expand_copy_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef size, bool implicit=false)
Definition: Functions.h:25531
inline ::std::vector< at::Tensor > _foreach_norm(at::TensorList self, const at::Scalar &ord=2)
Definition: Functions.h:12163
inline ::std::tuple< at::Tensor, at::Tensor > max_pool1d_with_indices(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:4471
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_eig_outf(const at::Tensor &self, at::Tensor &eigenvalues, at::Tensor &eigenvectors)
Definition: Functions.h:18057
void _foreach_tanh_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24822
int64_t size(const at::Tensor &self, at::Dimname dim)
Definition: Functions.h:7031
at::Tensor & deg2rad_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:5420
at::Tensor & log10_(at::Tensor &self)
Definition: Functions.h:4054
at::Tensor & upsample_bilinear2d_outf(const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:14906
at::Tensor adjoint(const at::Tensor &self)
Definition: Functions.h:5351
at::Tensor arctan2(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:10786
at::Tensor & reciprocal_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:6441
inline ::std::tuple< at::Tensor &, at::Tensor & > _aminmax_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &self)
Definition: Functions.h:21664
at::Tensor & batch_norm_backward_elemt_out(at::Tensor &out, const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &invstd, const c10::optional< at::Tensor > &weight, const at::Tensor &mean_dy, const at::Tensor &mean_dy_xmu, const at::Tensor &count)
Definition: Functions.h:22079
at::Tensor special_shifted_chebyshev_polynomial_t(const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19692
at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, at::Tensor &out)
Definition: Functions.h:6290
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_inv_ex_outf(const at::Tensor &A, bool check_errors, at::Tensor &inverse, at::Tensor &info)
Definition: Functions.h:18141
at::Tensor fbgemm_pack_quantized_matrix(const at::Tensor &input)
Definition: Functions.h:3983
at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:6081
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_batch_norm_backward_outf(const at::Tensor &grad_out, const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, const c10::optional< at::Tensor > &save_mean, const c10::optional< at::Tensor > &save_invstd, bool train, double eps, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:22065
at::Tensor special_entr(const at::Tensor &self)
Definition: Functions.h:16969
inline ::std::tuple< at::Tensor, at::Tensor > linalg_qr(const at::Tensor &A, c10::string_view mode="reduced")
Definition: Functions.h:18482
at::Tensor & mkldnn_reorder_conv2d_weight_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=c10::nullopt)
Definition: Functions.h:23602
at::Tensor & cholesky_out(at::Tensor &out, const at::Tensor &self, bool upper=false)
Definition: Functions.h:10514
at::Tensor & reflection_pad1d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:13718
at::Tensor & _to_copy_outf(const at::Tensor &self, bool non_blocking, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:23791
void _foreach_log_(at::TensorList self)
Definition: Functions.h:11958
at::Tensor & upsample_bicubic2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15159
at::Tensor lerp(const at::Tensor &self, const at::Tensor &end, const at::Scalar &weight)
Definition: Functions.h:10818
at::Tensor to_dense_backward(const at::Tensor &grad, const at::Tensor &input)
Definition: Functions.h:8931
inline ::std::tuple< at::Tensor &, at::Tensor & > batch_norm_update_stats_outf(const at::Tensor &input, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, double momentum, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:22092
at::Tensor & _stack_outf(at::TensorList tensors, int64_t dim, at::Tensor &out)
Definition: Functions.h:7400
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_batch_norm_backward(const at::Tensor &grad_out, const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, const c10::optional< at::Tensor > &save_mean, const c10::optional< at::Tensor > &save_invstd, bool train, double eps, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:5123
at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &indices)
Definition: Functions.h:13431
const at::Tensor & resize_outf(const at::Tensor &self, at::IntArrayRef size, c10::optional< at::MemoryFormat > memory_format, const at::Tensor &out)
Definition: Functions.h:21100
at::Tensor & clone_outf(const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:23311
at::Tensor fill(const at::Tensor &self, const at::Scalar &value)
Definition: Functions.h:3103
at::Tensor & replication_pad3d_outf(const at::Tensor &self, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:14312
at::Tensor & lu_solve_outf(const at::Tensor &self, const at::Tensor &LU_data, const at::Tensor &LU_pivots, at::Tensor &out)
Definition: Functions.h:10626
inline ::std::vector< at::Tensor > tensor_split_symint(const at::Tensor &self, c10::SymInt sections, int64_t dim=0)
Definition: Functions.h:1464
at::Tensor & new_empty_outf(const at::Tensor &self, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:20862
inline ::std::vector< at::Tensor > _foreach_sqrt(at::TensorList self)
Definition: Functions.h:11833
at::Tensor slow_conv_dilated3d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:16805
void _amp_foreach_non_finite_check_and_unscale_(at::TensorList self, at::Tensor &found_inf, const at::Tensor &inv_scale)
Definition: Functions.h:11568
void _foreach_sin_(at::TensorList self)
Definition: Functions.h:12028
at::Tensor ravel(const at::Tensor &self)
Definition: Functions.h:6426
at::Tensor & linalg_eigvals_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:18067
at::Tensor _adaptive_avg_pool2d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13260
at::Tensor & detach_(at::Tensor &self)
Definition: Functions.h:7021
at::Tensor & _sparse_sum_backward_outf(const at::Tensor &grad, const at::Tensor &self, at::IntArrayRef dim, at::Tensor &out)
Definition: Functions.h:23221
void _foreach_abs_(at::TensorList self)
Definition: Functions.h:11848
at::Tensor _softmax(const at::Tensor &self, int64_t dim, bool half_to_float)
Definition: Functions.h:7175
at::Tensor & special_erfinv_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17128
at::Tensor & upsample_bilinear2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:14961
inline ::std::tuple< at::Tensor, at::Tensor > linalg_ldl_factor(const at::Tensor &self, bool hermitian=false)
Definition: Functions.h:17926
at::Tensor glu(const at::Tensor &self, int64_t dim=-1)
Definition: Functions.h:12901
at::Tensor __xor__(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9724
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > unique_consecutive_outf(const at::Tensor &self, bool return_inverse, bool return_counts, c10::optional< int64_t > dim, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:23015
at::Tensor & special_shifted_chebyshev_polynomial_v_outf(const at::Tensor &x, const at::Tensor &n, at::Tensor &out)
Definition: Functions.h:19795
at::Tensor linalg_matmul(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:17968
at::Tensor & special_erfcx_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17118
at::Tensor & bitwise_and_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:9609
at::Tensor & index_add_outf(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &source, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:9457
at::Tensor linalg_ldl_solve(const at::Tensor &LD, const at::Tensor &pivots, const at::Tensor &B, bool hermitian=false)
Definition: Functions.h:17940
at::Tensor & arctan_(at::Tensor &self)
Definition: Functions.h:1001
at::Tensor subtract(const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha=1)
Definition: Functions.h:8546
at::Tensor _embedding_bag_sparse_backward_symint(const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offsets, const at::Tensor &offset2bag, const at::Tensor &bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx=-1)
Definition: Functions.h:2554
at::Tensor & _nested_from_padded_and_nested_example_outf(const at::Tensor &padded, const at::Tensor &nt_example, at::Tensor &out)
Definition: Functions.h:22970
at::Tensor & conv_depthwise3d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation)
Definition: Functions.h:25162
at::Tensor ger(const at::Tensor &self, const at::Tensor &vec2)
Definition: Functions.h:18202
void lstm_mps_backward_out(at::Tensor &out0, at::TensorList out1, at::TensorList out2, const at::Tensor &grad_y, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, const at::Tensor &z_state, const at::Tensor &cell_state_fwd, const at::Tensor &input, const at::Tensor &layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first)
Definition: Functions.h:23805
inline ::std::tuple< at::Tensor &, at::Tensor & > _thnn_fused_gru_cell_outf(const at::Tensor &input_gates, const at::Tensor &hidden_gates, const at::Tensor &hx, const c10::optional< at::Tensor > &input_bias, const c10::optional< at::Tensor > &hidden_bias, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:23836
at::Tensor & upsample_nearest3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16215
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_group_norm_backward_outf(const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21457
at::Tensor select(const at::Tensor &self, at::Dimname dim, int64_t index)
Definition: Functions.h:6773
inline ::std::tuple< at::Tensor &, at::Tensor & > var_mean_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &self, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< int64_t > correction=c10::nullopt, bool keepdim=false)
Definition: Functions.h:23082
at::Tensor sigmoid_backward(const at::Tensor &grad_output, const at::Tensor &output)
Definition: Functions.h:16356
at::Tensor narrow_symint(const at::Tensor &self, int64_t dim, c10::SymInt start, c10::SymInt length)
Definition: Functions.h:5019
at::Tensor & flip_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dims)
Definition: Functions.h:22894
at::Tensor conv3d(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1)
Definition: Functions.h:1827
inline ::std::vector< at::Tensor > _foreach_lgamma(at::TensorList self)
Definition: Functions.h:12053
at::Tensor & argmin_out(at::Tensor &out, const at::Tensor &self, c10::optional< int64_t > dim=c10::nullopt, bool keepdim=false)
Definition: Functions.h:772
at::Tensor & hardsigmoid_(at::Tensor &self)
Definition: Functions.h:12944
at::Tensor mkldnn_convolution_symint(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:4702
at::Tensor & xlogy_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4159
at::Tensor transpose_copy(const at::Tensor &self, int64_t dim0, int64_t dim1)
Definition: Functions.h:18899
at::Tensor fake_quantize_per_channel_affine_cachemask_backward(const at::Tensor &grad, const at::Tensor &mask)
Definition: Functions.h:9105
at::Tensor & replication_pad2d_backward_symint_outf(const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:14268
at::Tensor & rsub_outf(const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:23371
at::Tensor inner(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:18174
at::Tensor frac(const at::Tensor &self)
Definition: Functions.h:3161
at::Tensor & linalg_svdvals_out(at::Tensor &out, const at::Tensor &A, c10::optional< c10::string_view > driver=c10::nullopt)
Definition: Functions.h:18319
at::Tensor & square_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:7553
at::Tensor & _cudnn_init_dropout_state_outf(double dropout, bool train, int64_t dropout_seed, at::Tensor &out)
Definition: Functions.h:20043
inline ::std::vector< at::Tensor > split(const at::Tensor &self, int64_t split_size, int64_t dim=0)
Definition: Functions.h:7225
at::Tensor & _convolution_outf(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor &out)
Definition: Functions.h:20376
at::Tensor & concat_out(at::Tensor &out, at::TensorList tensors, int64_t dim=0)
Definition: Functions.h:1354
at::Tensor as_strided_copy_symint(const at::Tensor &self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional< c10::SymInt > storage_offset=c10::nullopt)
Definition: Functions.h:18716
at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional< at::Generator > generator=c10::nullopt, at::TensorOptions options={})
Definition: Functions.h:11497
void _foreach_cos_(at::TensorList self)
Definition: Functions.h:11898
at::Tensor & to_sparse_csr_out(at::Tensor &out, const at::Tensor &self, c10::optional< int64_t > dense_dim=c10::nullopt)
Definition: Functions.h:23557
at::Tensor _addmm_activation(const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta=1, const at::Scalar &alpha=1, bool use_gelu=false)
Definition: Functions.h:8632
at::Tensor & slow_conv_dilated3d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:25261
at::Tensor & _conj_physical_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:20084
at::Tensor & indices_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25779
at::Tensor reflection_pad3d_backward_symint(const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14026
at::Tensor & hann_window_out(at::Tensor &out, int64_t window_length)
Definition: Functions.h:21321
at::Tensor q_per_channel_zero_points(const at::Tensor &self)
Definition: Functions.h:9035
void _foreach_cosh_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24723
at::Tensor & special_gammaincc_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:17454
inline ::std::tuple< at::Tensor &, at::Tensor & > nll_loss_forward_out(at::Tensor &output, at::Tensor &total_weight, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index)
Definition: Functions.h:12445
at::Tensor & soft_margin_loss_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction, at::Tensor &grad_input)
Definition: Functions.h:12849
at::Tensor & softmax_outf(const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:7165
at::Tensor glu_jvp(const at::Tensor &glu, const at::Tensor &x, const at::Tensor &dx, int64_t dim)
Definition: Functions.h:12920
at::Tensor & hardshrink_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &lambd=0.5)
Definition: Functions.h:6726
at::Tensor sigmoid(const at::Tensor &self)
Definition: Functions.h:6921
at::Tensor upsample_nearest1d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15775
at::Tensor & bartlett_window_outf(int64_t window_length, at::Tensor &out)
Definition: Functions.h:20124
at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_symint_outf(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor &indices, const at::Tensor &values, at::Tensor &out)
Definition: Functions.h:23445
at::Tensor hspmm(const at::Tensor &mat1, const at::Tensor &mat2)
Definition: Functions.h:8950
at::Tensor & select_copy_symint_out(at::Tensor &out, const at::Tensor &self, int64_t dim, c10::SymInt index)
Definition: Functions.h:25628
at::Tensor embedding_dense_backward(const at::Tensor &grad_output, const at::Tensor &indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq)
Definition: Functions.h:2450
at::Tensor & thnn_conv2d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0)
Definition: Functions.h:16521
at::Tensor & _empty_per_channel_affine_quantized_outf(at::IntArrayRef size, const at::Tensor &scales, const at::Tensor &zero_points, int64_t axis, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:21084
at::Tensor polar(const at::Tensor &abs, const at::Tensor &angle)
Definition: Functions.h:1673
void split_copy_symint_out(at::TensorList out, const at::Tensor &self, c10::SymInt split_size, int64_t dim=0)
Definition: Functions.h:18985
at::Tensor & _embedding_bag_dense_backward_out(at::Tensor &out, const at::Tensor &grad, const at::Tensor &indices, const at::Tensor &offset2bag, const at::Tensor &bag_size, const at::Tensor &maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional< at::Tensor > &per_sample_weights, int64_t padding_idx=-1)
Definition: Functions.h:20789
at::Tensor & empty_strided_symint_out(at::Tensor &out, c10::SymIntArrayRef size, c10::SymIntArrayRef stride)
Definition: Functions.h:21209
at::Tensor _add_relu(const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha=1)
Definition: Functions.h:550
at::Tensor & floor_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:3137
at::Tensor & special_bessel_j1_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:19235
at::Tensor & mkldnn_reorder_conv3d_weight_outf(const at::Tensor &self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor &out)
Definition: Functions.h:23615
at::Tensor & fft_hfft_out(at::Tensor &out, const at::Tensor &self, c10::optional< int64_t > n=c10::nullopt, int64_t dim=-1, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17544
at::Tensor & replication_pad1d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef padding, at::Tensor &out)
Definition: Functions.h:14070
at::Tensor fractional_max_pool3d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor &indices)
Definition: Functions.h:13552
at::Tensor & amin_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim={}, bool keepdim=false)
Definition: Functions.h:4672
at::Tensor & special_log1p_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17422
at::Tensor & _cudnn_init_dropout_state_out(at::Tensor &out, double dropout, bool train, int64_t dropout_seed)
Definition: Functions.h:20039
at::Tensor & _upsample_nearest_exact3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16259
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_batch_norm(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, bool training, double momentum, double eps)
Definition: Functions.h:5052
at::Tensor greater_equal(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10056
at::Tensor multilabel_margin_loss(const at::Tensor &self, const at::Tensor &target, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:12324
at::Tensor miopen_convolution_symint(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:4744
at::Tensor & special_chebyshev_polynomial_w_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19413
inline ::std::tuple< at::Tensor, at::Tensor > var_mean(const at::Tensor &self, bool unbiased)
Definition: Functions.h:8052
at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, at::TensorOptions options)
Definition: Functions.h:249
at::Tensor & lerp_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &end, const at::Scalar &weight)
Definition: Functions.h:10800
at::Tensor _masked_scale(const at::Tensor &self, const at::Tensor &mask, double scale)
Definition: Functions.h:268
at::Tensor & special_sinc_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17398
at::Tensor & new_ones_outf(const at::Tensor &self, at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:21038
inline ::std::tuple< at::Tensor, at::Tensor > _grid_sampler_2d_cpu_fallback_backward(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners)
Definition: Functions.h:3353
at::Tensor & round_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:6619
at::Tensor & miopen_depthwise_convolution_symint_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor &out)
Definition: Functions.h:21982
at::Tensor relu6(const at::Tensor &self)
Definition: Functions.h:6663
at::Tensor & _sparse_csr_sum_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:23226
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _embedding_bag_forward_only_outf(const at::Tensor &weight, const at::Tensor &indices, const at::Tensor &offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional< at::Tensor > &per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3)
Definition: Functions.h:20775
at::Tensor group_norm(const at::Tensor &input, int64_t num_groups, const c10::optional< at::Tensor > &weight={}, const c10::optional< at::Tensor > &bias={}, double eps=1e-05, bool cudnn_enabled=true)
Definition: Functions.h:3454
at::Tensor & arccosh_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:814
at::Tensor & less_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10215
at::Tensor & _sparse_csr_prod_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:23235
inline ::std::tuple< at::Tensor &, at::Tensor & > mkldnn_linear_backward_weights_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, bool bias_defined)
Definition: Functions.h:21637
at::Tensor threshold_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &threshold)
Definition: Functions.h:7766
inline ::std::tuple< at::Tensor, at::Tensor > linalg_inv_ex(const at::Tensor &A, bool check_errors=false)
Definition: Functions.h:18132
void _validate_sparse_compressed_tensor_args(const at::Tensor &compressed_indices, const at::Tensor &plain_indices, const at::Tensor &values, at::IntArrayRef size, at::Layout layout)
Definition: Functions.h:8848
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _slow_conv2d_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:16558
at::Tensor & to_padded_tensor_outf(const at::Tensor &self, double padding, at::OptionalIntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:25911
at::Tensor & special_round_outf(const at::Tensor &self, int64_t decimals, at::Tensor &out)
Definition: Functions.h:17412
void _foreach_tan_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24813
at::Tensor & linalg_det_outf(const at::Tensor &A, at::Tensor &out)
Definition: Functions.h:17902
at::Tensor & special_gammaln_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17076
void _foreach_maximum_outf(at::TensorList self, const at::Scalar &scalar, at::TensorList out)
Definition: Functions.h:24479
at::Tensor & mkldnn_adaptive_avg_pool2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13246
at::Tensor & _convolution_symint_outf(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor &out)
Definition: Functions.h:20398
at::Tensor & _sparse_addmm_outf(const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:23389
at::Tensor & softplus_outf(const at::Tensor &self, const at::Scalar &beta, const at::Scalar &threshold, at::Tensor &out)
Definition: Functions.h:13123
at::Tensor & _convolution_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32)
Definition: Functions.h:20365
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > linalg_lu_factor_ex(const at::Tensor &A, bool pivot=true, bool check_errors=false)
Definition: Functions.h:17837
int64_t _cufft_get_plan_cache_max_size(int64_t device_index)
Definition: Functions.h:3607
at::Tensor & multinomial_outf(const at::Tensor &self, int64_t num_samples, bool replacement, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:10654
void _cummax_helper(const at::Tensor &self, at::Tensor &values, at::Tensor &indices, int64_t dim)
Definition: Functions.h:2038
inline ::std::tuple< at::Tensor &, at::Tensor & > _aminmax_outf(const at::Tensor &self, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:21668
at::Tensor & acosh_(at::Tensor &self)
Definition: Functions.h:786
at::Tensor & exponential_outf(const at::Tensor &self, double lambd, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:24226
void _foreach_erf_(at::TensorList self)
Definition: Functions.h:11918
at::Tensor & detach_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25650
at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional< at::MemoryFormat > memory_format=MemoryFormat::Contiguous)
Definition: Functions.h:2785
at::Tensor cosh(const at::Tensor &self)
Definition: Functions.h:1906
at::Tensor & _test_autograd_multiple_dispatch_view_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25357
at::Tensor & inverse_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:18165
at::Tensor & logsumexp_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim, bool keepdim=false)
Definition: Functions.h:4298
const at::Tensor & _resize_output_out(const at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, at::Device device)
Definition: Functions.h:21155
at::Tensor smm(const at::Tensor &self, const at::Tensor &mat2)
Definition: Functions.h:7151
at::Tensor _cast_Double(const at::Tensor &self, bool non_blocking=false)
Definition: Functions.h:93
at::Tensor replication_pad2d_symint(const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14224
at::Tensor fmax(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:11041
inline ::std::tuple< at::Tensor &, at::Tensor & > fake_quantize_per_channel_affine_cachemask_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t axis, int64_t quant_min, int64_t quant_max)
Definition: Functions.h:23755
at::Tensor & logical_not_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:1210
at::Tensor & _sparse_softmax_outf(const at::Tensor &self, int64_t dim, bool half_to_float, at::Tensor &out)
Definition: Functions.h:23248
at::Tensor & nansum_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:7515
at::Tensor & minimum_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:11097
at::Tensor bitwise_not(const at::Tensor &self)
Definition: Functions.h:1159
at::Tensor & _upsample_bilinear2d_aa_outf(const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15038
at::Tensor & _adaptive_avg_pool3d_backward_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:25144
at::Tensor conv_depthwise3d(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation)
Definition: Functions.h:16629
at::Tensor mkldnn_convolution(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:4691
at::Tensor & isneginf_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:16950
inline ::std::vector< at::Tensor > _foreach_minimum(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11648
at::Tensor & fft_irfft_outf(const at::Tensor &self, c10::optional< int64_t > n, int64_t dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17534
at::Tensor & dequantize_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:23665
at::Tensor upsample_nearest1d(const at::Tensor &input, at::OptionalIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14631
at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15401
inline ::std::tuple< at::Tensor &, at::Tensor & > kthvalue_outf(const at::Tensor &self, int64_t k, int64_t dim, bool keepdim, at::Tensor &values, at::Tensor &indices)
Definition: Functions.h:3810
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_layer_norm_symint(const at::Tensor &input, c10::SymIntArrayRef normalized_shape, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, double eps)
Definition: Functions.h:3862
inline ::std::vector< at::Tensor > _foreach_sin(at::TensorList self)
Definition: Functions.h:12023
inline ::std::tuple< at::Tensor &, at::Tensor & > native_dropout_outf(const at::Tensor &input, double p, c10::optional< bool > train, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:20070
at::Tensor & square_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:7557
at::Tensor _fft_r2c(const at::Tensor &self, at::IntArrayRef dim, int64_t normalization, bool onesided)
Definition: Functions.h:3503
at::Tensor & adaptive_avg_pool2d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size)
Definition: Functions.h:13197
at::Tensor & conv_tbc_outf(const at::Tensor &self, const at::Tensor &weight, const at::Tensor &bias, int64_t pad, at::Tensor &out)
Definition: Functions.h:20413
at::Tensor & addr_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &vec1, const at::Tensor &vec2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:608
at::Tensor & max_unpool3d_outf(const at::Tensor &self, const at::Tensor &indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:13631
at::Tensor & copy_sparse_to_sparse_(at::Tensor &self, const at::Tensor &src, bool non_blocking=false)
Definition: Functions.h:8955
at::Tensor adaptive_avg_pool3d_symint(const at::Tensor &self, c10::SymIntArrayRef output_size)
Definition: Functions.h:13342
inline ::std::tuple< at::Tensor &, at::Tensor & > _cudnn_ctc_loss_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &log_probs, const at::Tensor &targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity)
Definition: Functions.h:19898
at::Tensor & as_strided_scatter_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset=c10::nullopt)
Definition: Functions.h:22726
at::Tensor & linalg_matrix_power_outf(const at::Tensor &self, int64_t n, at::Tensor &out)
Definition: Functions.h:18505
at::Tensor & exp2_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:2995
at::Tensor & special_modified_bessel_k0_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:19641
at::Tensor & polygamma_outf(int64_t n, const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:10696
at::Tensor & embedding_out(at::Tensor &out, const at::Tensor &weight, const at::Tensor &indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false)
Definition: Functions.h:20669
inline ::std::tuple< at::Tensor &, at::Tensor & > nanmedian_out(at::Tensor &values, at::Tensor &indices, const at::Tensor &self, int64_t dim, bool keepdim=false)
Definition: Functions.h:4616
at::Tensor & pixel_shuffle_out(at::Tensor &out, const at::Tensor &self, int64_t upscale_factor)
Definition: Functions.h:22204
at::Tensor & log2_(at::Tensor &self)
Definition: Functions.h:4092
at::Tensor & addmm_outf(const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:8613
at::Tensor linalg_cholesky(const at::Tensor &self, bool upper=false)
Definition: Functions.h:17795
inline ::std::vector< at::Tensor > _foreach_clamp_min(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11618
at::Tensor special_chebyshev_polynomial_u(const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19314
at::Tensor & index_fill_out(at::Tensor &out, const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Scalar &value)
Definition: Functions.h:24039
at::Tensor & gelu_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, c10::string_view approximate="none")
Definition: Functions.h:6707
at::Tensor & scatter_outf(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &src, at::Tensor &out)
Definition: Functions.h:9515
at::Tensor vstack(at::TensorList tensors)
Definition: Functions.h:7419
at::Tensor fix(const at::Tensor &self)
Definition: Functions.h:7910
at::Tensor & adaptive_max_pool2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &indices, at::Tensor &grad_input)
Definition: Functions.h:13407
at::Tensor linalg_eigvals(const at::Tensor &self)
Definition: Functions.h:18062
at::Tensor & tan_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:7696
at::Tensor & _compute_linear_combination_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &coefficients)
Definition: Functions.h:4398
inline ::std::vector< at::Tensor > _foreach_round(at::TensorList self)
Definition: Functions.h:12043
at::Tensor & _new_zeros_with_same_feature_meta_outf(const at::Tensor &self, const at::Tensor &other, int64_t self_num_batch_dims, at::Tensor &out)
Definition: Functions.h:19893
at::Tensor median(const at::Tensor &self)
Definition: Functions.h:4573
at::Tensor & pow_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &exponent)
Definition: Functions.h:11344
inline ::std::vector< at::Tensor > _foreach_addcdiv(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar &value=1)
Definition: Functions.h:12133
at::Tensor diagonal(const at::Tensor &self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1)
Definition: Functions.h:2192
inline ::std::vector< at::Tensor > _foreach_expm1(at::TensorList self)
Definition: Functions.h:11933
const at::Tensor & fft_ihfftn_out(const at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17726
inline ::std::tuple< at::Tensor &, at::Tensor & > nanmedian_outf(const at::Tensor &self, int64_t dim, bool keepdim, at::Tensor &values, at::Tensor &indices)
Definition: Functions.h:4620
inline ::std::tuple< double, int64_t > _choose_qparams_per_tensor(const at::Tensor &self, bool reduce_range=false)
Definition: Functions.h:9130
void _foreach_norm_outf(at::TensorList self, const at::Scalar &ord, at::TensorList out)
Definition: Functions.h:24961
void _foreach_asin_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24691
inline ::std::tuple< at::Tensor, at::Tensor > mps_convolution_transpose_backward(const at::Tensor &self, const at::Tensor &grad_output, const at::Tensor &weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array< bool, 2 > output_mask)
Definition: Functions.h:1985
at::Tensor fbgemm_linear_fp16_weight(const at::Tensor &input, const at::Tensor &packed_weight, const at::Tensor &bias)
Definition: Functions.h:3978
at::Tensor & special_bessel_j1_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:19239
at::Tensor & inverse_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:18169
at::Tensor quantized_gru_cell(const at::Tensor &input, const at::Tensor &hx, const at::Tensor &w_ih, const at::Tensor &w_hh, const at::Tensor &b_ih, const at::Tensor &b_hh, const at::Tensor &packed_ih, const at::Tensor &packed_hh, const at::Tensor &col_offsets_ih, const at::Tensor &col_offsets_hh, const at::Scalar &scale_ih, const at::Scalar &scale_hh, const at::Scalar &zero_point_ih, const at::Scalar &zero_point_hh)
Definition: Functions.h:9319
at::Tensor sparse_sampled_addmm(const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:8594
at::Tensor hardtanh(const at::Tensor &self, const at::Scalar &min_val=-1, const at::Scalar &max_val=1)
Definition: Functions.h:12972
at::Tensor true_divide(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:2354
at::Tensor & full_symint_outf(c10::SymIntArrayRef size, const at::Scalar &fill_value, at::Tensor &out)
Definition: Functions.h:3266
at::Tensor & logical_xor_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:1220
at::Tensor fake_quantize_per_channel_affine(const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t axis, int64_t quant_min, int64_t quant_max)
Definition: Functions.h:9095
at::Tensor squeeze(const at::Tensor &self)
Definition: Functions.h:7343
at::Tensor & _sobol_engine_initialize_state_(at::Tensor &self, int64_t dimension)
Definition: Functions.h:298
at::Tensor & histc_outf(const at::Tensor &self, int64_t bins, const at::Scalar &min, const at::Scalar &max, at::Tensor &out)
Definition: Functions.h:10832
at::Scalar _local_scalar_dense(const at::Tensor &self)
Definition: Functions.h:9204
at::Tensor & convolution_outf(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor &out)
Definition: Functions.h:20270
at::Tensor special_zeta(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:17235
at::Tensor & diagonal_copy_outf(const at::Tensor &self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor &out)
Definition: Functions.h:25504
at::Tensor & true_divide_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:2363
void _foreach_cos_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24714
at::Tensor & sin_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:6969
at::Tensor & digamma_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:10682
at::Tensor & slow_conv_dilated2d_symint_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:25239
at::Tensor _pin_memory(const at::Tensor &self, c10::optional< at::Device > device=c10::nullopt)
Definition: Functions.h:5376
at::Tensor isfinite(const at::Tensor &self)
Definition: Functions.h:16921
at::Tensor & set_out(at::Tensor &out, const at::Tensor &self, at::Storage source)
Definition: Functions.h:23859
at::Tensor cross_entropy_loss_symint(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100, double label_smoothing=0.0)
Definition: Functions.h:10441
at::Tensor hann_window(int64_t window_length, at::TensorOptions options={})
Definition: Functions.h:3368
at::Tensor & _test_optional_intlist_out(at::Tensor &out, const at::Tensor &values, at::OptionalIntArrayRef addends)
Definition: Functions.h:25312
at::Tensor & _nested_tensor_from_mask_outf(const at::Tensor &t, const at::Tensor &mask, bool mask_check, at::Tensor &out)
Definition: Functions.h:22934
at::Tensor & crow_indices_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25797
at::Tensor & nll_loss_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor &total_weight)
Definition: Functions.h:12533
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > convolution_backward_symint_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:20325
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > convolution_backward_overrideable_outf(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:20360
at::Tensor slice_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step)
Definition: Functions.h:7069
at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options)
Definition: Functions.h:8873
at::Tensor & elu_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &alpha=1, const at::Scalar &scale=1, const at::Scalar &input_scale=1)
Definition: Functions.h:12859
at::Tensor & arcsin_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:972
at::Tensor as_strided_symint(const at::Tensor &self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional< c10::SymInt > storage_offset=c10::nullopt)
Definition: Functions.h:906
at::Tensor & __rshift___out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:24111
at::Tensor & scalar_tensor_out(at::Tensor &out, const at::Scalar &s)
Definition: Functions.h:22240
at::Tensor linalg_inv(const at::Tensor &A)
Definition: Functions.h:18146
at::Tensor upsample_bicubic2d(const at::Tensor &input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14587
inline ::std::tuple< at::Tensor &, at::Tensor & > std_mean_outf(const at::Tensor &self, at::OptionalIntArrayRef dim, c10::optional< int64_t > correction, bool keepdim, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:22871
at::Tensor & empty_outf(at::IntArrayRef size, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:2842
at::Tensor & ldexp_(at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3998
at::Tensor & _upsample_nearest_exact3d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:16270
at::Tensor hstack(at::TensorList tensors)
Definition: Functions.h:7405
at::Tensor _pad_enum(const at::Tensor &self, at::IntArrayRef pad, int64_t mode, c10::optional< double > value=c10::nullopt)
Definition: Functions.h:14455
at::Tensor replication_pad2d_backward_symint(const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14290
at::Tensor & linalg_matrix_norm_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:18263
at::Tensor & bincount_out(at::Tensor &out, const at::Tensor &self, const c10::optional< at::Tensor > &weights={}, int64_t minlength=0)
Definition: Functions.h:20179
at::Tensor addr(const at::Tensor &self, const at::Tensor &vec1, const at::Tensor &vec2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:603
at::Tensor poisson(const at::Tensor &self, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:8271
at::Tensor greater(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10168
at::Tensor special_sinc(const at::Tensor &self)
Definition: Functions.h:17389
at::Tensor swapaxes(const at::Tensor &self, int64_t axis0, int64_t axis1)
Definition: Functions.h:10504
at::Tensor miopen_convolution(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:4733
at::Tensor nansum(const at::Tensor &self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:7510
at::Tensor & _cudnn_rnn_flatten_weight_symint_outf(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor &out)
Definition: Functions.h:19940
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_group_norm(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps)
Definition: Functions.h:3459
at::Tensor & conv_depthwise3d_symint_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:25195
at::Tensor & softplus_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &beta=1, const at::Scalar &threshold=20)
Definition: Functions.h:13119
at::Tensor & atanh_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:871
inline ::std::tuple< at::Tensor &, at::Tensor & > adaptive_max_pool2d_out(at::Tensor &out, at::Tensor &indices, const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13389
at::Tensor linear(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias={})
Definition: Functions.h:3914
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > unique_dim_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &self, int64_t dim, bool sorted=true, bool return_inverse=false, bool return_counts=false)
Definition: Functions.h:23002
at::Tensor & _sparse_sum_backward_out(at::Tensor &out, const at::Tensor &grad, const at::Tensor &self, at::IntArrayRef dim)
Definition: Functions.h:23217
inline ::std::vector< at::Tensor > _foreach_log1p(at::TensorList self)
Definition: Functions.h:11973
void _foreach_sqrt_(at::TensorList self)
Definition: Functions.h:11838
at::Tensor _upsample_nearest_exact2d_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16072
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_layer_norm_backward(const at::Tensor &grad_out, const at::Tensor &input, at::IntArrayRef normalized_shape, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:3873
at::Tensor & special_spherical_bessel_j0_outf(const at::Tensor &x, at::Tensor &out)
Definition: Functions.h:19869
at::Tensor & binary_cross_entropy_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, at::Tensor &out)
Definition: Functions.h:1130
at::Tensor & embedding_dense_backward_symint_outf(const at::Tensor &grad_output, const at::Tensor &indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor &out)
Definition: Functions.h:20746
at::Tensor & slow_conv_dilated2d_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:25217
at::Tensor & _neg_view_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25438
at::Tensor elu_backward(const at::Tensor &grad_output, const at::Scalar &alpha, const at::Scalar &scale, const at::Scalar &input_scale, bool is_result, const at::Tensor &self_or_result)
Definition: Functions.h:12882
inline ::std::tuple< at::Tensor &, at::Tensor & > qr_outf(const at::Tensor &self, bool some, at::Tensor &Q, at::Tensor &R)
Definition: Functions.h:10565
at::Tensor & _stack_out(at::Tensor &out, at::TensorList tensors, int64_t dim=0)
Definition: Functions.h:7396
at::Tensor & log_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:4040
at::Tensor & repeat_outf(const at::Tensor &self, at::IntArrayRef repeats, at::Tensor &out)
Definition: Functions.h:22472
at::Tensor _pad_enum_symint(const at::Tensor &self, c10::SymIntArrayRef pad, int64_t mode, c10::optional< double > value=c10::nullopt)
Definition: Functions.h:14466
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor > _linalg_solve_ex(const at::Tensor &A, const at::Tensor &B, bool left=true, bool check_errors=false)
Definition: Functions.h:18412
at::Tensor sign(const at::Tensor &self)
Definition: Functions.h:10739
at::Tensor & upsample_trilinear3d_outf(const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15434
at::Tensor & special_exp2_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17034
at::Tensor & _triton_scaled_dot_attention_outf(const at::Tensor &q, const at::Tensor &k, const at::Tensor &v, double dropout_p, at::Tensor &out)
Definition: Functions.h:25966
at::Tensor channel_shuffle(const at::Tensor &self, int64_t groups)
Definition: Functions.h:5366
at::Tensor & randint_symint_out(at::Tensor &out, int64_t high, c10::SymIntArrayRef size)
Definition: Functions.h:5909
at::Tensor __or__(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9681
at::Tensor less_equal(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10112
inline ::std::tuple< at::Tensor, at::Tensor > _linalg_eigh(const at::Tensor &A, c10::string_view UPLO="L", bool compute_v=true)
Definition: Functions.h:18076
at::Tensor & _trilinear_out(at::Tensor &out, const at::Tensor &i1, const at::Tensor &i2, const at::Tensor &i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim=1)
Definition: Functions.h:22984
at::Tensor heaviside(const at::Tensor &self, const at::Tensor &values)
Definition: Functions.h:8570
at::Tensor & huber_loss_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, int64_t reduction=at::Reduction::Mean, double delta=1.0)
Definition: Functions.h:12803
at::Tensor & reflection_pad1d_backward_symint_outf(const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:13740
at::Tensor & slow_conv_transpose3d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1)
Definition: Functions.h:16455
at::Tensor _fake_quantize_learnable_per_channel_affine(const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0)
Definition: Functions.h:9110
inline ::std::tuple< at::Tensor, at::Tensor > linalg_cholesky_ex(const at::Tensor &self, bool upper=false, bool check_errors=false)
Definition: Functions.h:17781
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _lstm_mps_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4, at::Tensor &out5, const at::Tensor &input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first)
Definition: Functions.h:23796
at::Tensor from_file(c10::string_view filename, c10::optional< bool > shared=c10::nullopt, c10::optional< int64_t > size=0, at::TensorOptions options={})
Definition: Functions.h:3286
at::Tensor leaky_relu_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &negative_slope, bool self_is_result)
Definition: Functions.h:13043
at::Tensor conv2d(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1)
Definition: Functions.h:1822
at::Tensor resize_symint(const at::Tensor &self, c10::SymIntArrayRef size, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:21144
at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor &indices, const at::Tensor &values, at::TensorOptions options)
Definition: Functions.h:8882
at::Tensor & miopen_depthwise_convolution_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:21971
inline ::std::tuple< at::Tensor, at::Tensor > linalg_solve_ex(const at::Tensor &A, const at::Tensor &B, bool left=true, bool check_errors=false)
Definition: Functions.h:18426
inline ::std::tuple< at::Tensor, at::Tensor > native_dropout(const at::Tensor &input, double p, c10::optional< bool > train)
Definition: Functions.h:273
at::Tensor linalg_tensorinv(const at::Tensor &self, int64_t ind=2)
Definition: Functions.h:18454
at::Tensor t_copy(const at::Tensor &self)
Definition: Functions.h:18894
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > mkldnn_linear_backward_outf(const at::Tensor &self, const at::Tensor &grad_output, const at::Tensor &weight, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21650
at::Tensor & to_sparse_outf(const at::Tensor &self, int64_t sparse_dim, at::Tensor &out)
Definition: Functions.h:23543
at::Tensor minimum(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:11092
at::Tensor & dot_outf(const at::Tensor &self, const at::Tensor &tensor, at::Tensor &out)
Definition: Functions.h:2382
at::Tensor & indices_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25775
at::Tensor & cos_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:1901
at::Tensor _upsample_nearest_exact3d(const at::Tensor &input, at::OptionalIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14741
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _embedding_bag_outf(const at::Tensor &weight, const at::Tensor &indices, const at::Tensor &offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional< at::Tensor > &per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3)
Definition: Functions.h:20784
at::Tensor hardswish_backward(const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:13015
inline ::std::vector< at::Tensor > _foreach_ceil(at::TensorList self)
Definition: Functions.h:11883
at::Tensor replication_pad1d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14147
const at::Tensor & resize_as_out(const at::Tensor &out, const at::Tensor &self, const at::Tensor &the_template, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:23316
at::Tensor & index_outf(const at::Tensor &self, const c10::List< c10::optional< at::Tensor > > &indices, at::Tensor &out)
Definition: Functions.h:3631
at::Tensor & _grid_sampler_2d_cpu_fallback_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners)
Definition: Functions.h:21294
at::Tensor & _add_relu_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha=1)
Definition: Functions.h:560
inline ::std::vector< at::Tensor > _to_cpu(at::TensorList tensors)
Definition: Functions.h:8926
double q_scale(const at::Tensor &self)
Definition: Functions.h:9020
at::Tensor layer_norm(const at::Tensor &input, at::IntArrayRef normalized_shape, const c10::optional< at::Tensor > &weight={}, const c10::optional< at::Tensor > &bias={}, double eps=1e-05, bool cudnn_enable=true)
Definition: Functions.h:3829
at::Tensor _nested_from_padded(const at::Tensor &padded, const at::Tensor &cpu_nested_shape_example, bool fuse_transform_0213=false)
Definition: Functions.h:7861
at::Tensor linalg_cond(const at::Tensor &self, const c10::optional< at::Scalar > &p=c10::nullopt)
Definition: Functions.h:18328
at::Tensor & sigmoid_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &output)
Definition: Functions.h:16347
inline ::std::vector< at::Tensor > unsafe_split_symint(const at::Tensor &self, c10::SymInt split_size, int64_t dim=0)
Definition: Functions.h:7214
at::Tensor digamma(const at::Tensor &self)
Definition: Functions.h:10687
at::Tensor upsample_nearest2d_symint(const at::Tensor &input, at::OptionalSymIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14686
void _foreach_tanh_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24826
at::Tensor native_norm(const at::Tensor &self, const at::Scalar &p=2)
Definition: Functions.h:8281
inline ::std::tuple< at::Tensor,::std::vector< at::Tensor >,::std::vector< at::Tensor > > lstm_mps_backward(const at::Tensor &grad_y, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, const at::Tensor &z_state, const at::Tensor &cell_state_fwd, const at::Tensor &input, const at::Tensor &layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first)
Definition: Functions.h:9214
at::Tensor & _new_zeros_with_same_feature_meta_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other, int64_t self_num_batch_dims=0)
Definition: Functions.h:19889
void _foreach_add_outf(at::TensorList self, const at::Scalar &scalar, at::TensorList out)
Definition: Functions.h:24425
at::Tensor & special_polygamma_out(at::Tensor &out, int64_t n, const at::Tensor &self)
Definition: Functions.h:17352
void unsafe_split_out(at::TensorList out, const at::Tensor &self, int64_t split_size, int64_t dim=0)
Definition: Functions.h:22770
at::Tensor & special_i1_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17310
at::Tensor flip(const at::Tensor &self, at::IntArrayRef dims)
Definition: Functions.h:7801
at::Tensor & zero_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:23344
at::Tensor & orgqr_outf(const at::Tensor &self, const at::Tensor &input2, at::Tensor &out)
Definition: Functions.h:10598
at::Tensor fbgemm_linear_int8_weight(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &packed, const at::Tensor &col_offsets, const at::Scalar &weight_scale, const at::Scalar &weight_zero_point, const at::Tensor &bias)
Definition: Functions.h:3958
void _foreach_erfc_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24741
at::Tensor & narrow_copy_symint_out(at::Tensor &out, const at::Tensor &self, int64_t dim, c10::SymInt start, c10::SymInt length)
Definition: Functions.h:4986
at::Tensor & log_normal_out(at::Tensor &out, const at::Tensor &self, double mean=1, double std=2, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:24208
at::Tensor arccosh(const at::Tensor &self)
Definition: Functions.h:800
at::Tensor & special_expit_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17384
at::Tensor amin(const at::Tensor &self, at::IntArrayRef dim={}, bool keepdim=false)
Definition: Functions.h:4667
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > lstm(const at::Tensor &input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first)
Definition: Functions.h:9254
void _foreach_log1p_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24786
inline ::std::tuple< at::Tensor &, at::Tensor & > batch_norm_update_stats_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &input, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, double momentum)
Definition: Functions.h:22088
at::Tensor & hardswish_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:12996
at::Tensor & special_chebyshev_polynomial_v_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19371
at::Tensor index_put(const at::Tensor &self, const c10::List< c10::optional< at::Tensor > > &indices, const at::Tensor &values, bool accumulate=false)
Definition: Functions.h:3660
at::Tensor _trilinear(const at::Tensor &i1, const at::Tensor &i2, const at::Tensor &i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim=1)
Definition: Functions.h:7881
at::Tensor row_indices_copy(const at::Tensor &self)
Definition: Functions.h:18944
at::Tensor & prod_outf(const at::Tensor &self, int64_t dim, bool keepdim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:7662
at::Tensor & special_modified_bessel_k1_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:19659
at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:6268
at::Tensor max_pool1d(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:4476
at::Tensor & _addmm_activation_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta=1, const at::Scalar &alpha=1, bool use_gelu=false)
Definition: Functions.h:8623
at::Tensor & sgn_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:415
at::Tensor & linalg_multi_dot_out(at::Tensor &out, at::TensorList tensors)
Definition: Functions.h:18571
at::Tensor & kaiser_window_out(at::Tensor &out, int64_t window_length)
Definition: Functions.h:21375
at::Tensor & _mps_convolution_transpose_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:20517
at::Tensor _nested_tensor_softmax_with_shape(const at::Tensor &self, const at::Tensor &query)
Definition: Functions.h:19102
at::Tensor & _cdist_backward_out(at::Tensor &out, const at::Tensor &grad, const at::Tensor &x1, const at::Tensor &x2, double p, const at::Tensor &cdist)
Definition: Functions.h:22177
at::Tensor lcm(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3323
at::Tensor batch_norm_elemt(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const at::Tensor &mean, const at::Tensor &invstd, double eps)
Definition: Functions.h:5099
inline ::std::tuple< at::Tensor, at::Tensor > histogram(const at::Tensor &self, const at::Tensor &bins, const c10::optional< at::Tensor > &weight={}, bool density=false)
Definition: Functions.h:10851
at::Tensor & mvlgamma_outf(const at::Tensor &self, int64_t p, at::Tensor &out)
Definition: Functions.h:4932
at::Tensor & greater_equal_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10047
at::Tensor isneginf(const at::Tensor &self)
Definition: Functions.h:16945
at::Tensor & conj_physical_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:458
bool _is_zerotensor(const Tensor &tensor)
Definition: Functions.h:26078
at::Tensor & triu_out(at::Tensor &out, const at::Tensor &self, int64_t diagonal=0)
Definition: Functions.h:9862
at::Tensor & quantized_max_pool1d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:21727
at::Tensor & cross_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other, c10::optional< int64_t > dim=c10::nullopt)
Definition: Functions.h:9848
at::Tensor & nll_loss_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, const at::Tensor &total_weight)
Definition: Functions.h:12511
const at::Tensor & resize_as_sparse_outf(const at::Tensor &self, const at::Tensor &the_template, const at::Tensor &out)
Definition: Functions.h:23334
at::Tensor replication_pad2d(const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14213
at::Tensor & ldexp_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:4007
at::Tensor & mvlgamma_out(at::Tensor &out, const at::Tensor &self, int64_t p)
Definition: Functions.h:4928
void _foreach_log2_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24799
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_batch_norm_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &grad_out, const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, const c10::optional< at::Tensor > &save_mean, const c10::optional< at::Tensor > &save_invstd, bool train, double eps, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:22061
at::Tensor & new_zeros_symint_outf(const at::Tensor &self, c10::SymIntArrayRef size, at::Tensor &out)
Definition: Functions.h:21016
at::Tensor & asin_(at::Tensor &self)
Definition: Functions.h:944
inline ::std::tuple< at::Tensor, at::Tensor > _unique(const at::Tensor &self, bool sorted=true, bool return_inverse=false)
Definition: Functions.h:7934
at::Tensor & new_ones_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef size)
Definition: Functions.h:21049
at::Tensor glu_backward(const at::Tensor &grad_output, const at::Tensor &self, int64_t dim)
Definition: Functions.h:12915
at::Tensor & upsample_nearest2d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15973
at::Tensor soft_margin_loss_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction)
Definition: Functions.h:12854
at::Tensor & fmod_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10900
at::Tensor replication_pad1d_backward_symint(const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14158
at::Tensor & repeat_interleave_out(at::Tensor &out, const at::Tensor &repeats, c10::optional< int64_t > output_size=c10::nullopt)
Definition: Functions.h:22505
at::Tensor & t_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25730
at::Tensor erfc(const at::Tensor &self)
Definition: Functions.h:2947
at::Tensor & _sparse_addmm_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:23385
const at::Tensor & resize_symint_outf(const at::Tensor &self, c10::SymIntArrayRef size, c10::optional< at::MemoryFormat > memory_format, const at::Tensor &out)
Definition: Functions.h:21122
at::Tensor rot90(const at::Tensor &self, int64_t k=1, at::IntArrayRef dims={0, 1})
Definition: Functions.h:7821
at::Tensor & convolution_symint_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups)
Definition: Functions.h:20281
at::Tensor replication_pad3d(const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14345
at::Tensor & atanh_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:867
at::Tensor & bitwise_left_shift_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:9753
at::Tensor bitwise_and(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9623
at::Tensor upsample_linear1d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:14873
inline ::std::tuple< at::Tensor, at::Tensor > _sobol_engine_draw(const at::Tensor &quasi, int64_t n, const at::Tensor &sobolstate, int64_t dimension, int64_t num_generated, c10::optional< at::ScalarType > dtype)
Definition: Functions.h:283
at::Tensor & sgn_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:419
at::Tensor _cast_Byte(const at::Tensor &self, bool non_blocking=false)
Definition: Functions.h:83
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > convolution_backward_overrideable_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:20356
at::Tensor & q_per_channel_zero_points_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:23696
at::Tensor fft_ifftn(const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17665
at::Tensor & _coalesced_out(at::Tensor &out, const at::Tensor &self, bool coalesced)
Definition: Functions.h:23511
at::Tensor & max_unpool3d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding)
Definition: Functions.h:13627
at::Tensor & log_sigmoid_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:13053
at::Tensor _is_all_true(const at::Tensor &self)
Definition: Functions.h:627
at::Tensor upsample_bicubic2d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15269
at::Tensor as_strided_scatter_symint(const at::Tensor &self, const at::Tensor &src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional< c10::SymInt > storage_offset=c10::nullopt)
Definition: Functions.h:7140
at::Tensor & mean_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:4536
at::Tensor & glu_jvp_out(at::Tensor &out, const at::Tensor &glu, const at::Tensor &x, const at::Tensor &dx, int64_t dim)
Definition: Functions.h:25002
at::Tensor & gather_outf(const at::Tensor &self, int64_t dim, const at::Tensor &index, bool sparse_grad, at::Tensor &out)
Definition: Functions.h:10368
at::Tensor & log_(at::Tensor &self)
Definition: Functions.h:4035
at::Tensor & quantized_max_pool1d_outf(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor &out)
Definition: Functions.h:21731
at::Tensor & smooth_l1_loss_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction, double beta, at::Tensor &grad_input)
Definition: Functions.h:12793
at::Tensor _test_string_default(const at::Tensor &dummy, c10::string_view a="\"'\\", c10::string_view b="\"'\\")
Definition: Functions.h:18605
void _linalg_check_errors(const at::Tensor &info, c10::string_view api_name, bool is_matrix)
Definition: Functions.h:10466
at::Tensor & cat_out(at::Tensor &out, const at::ITensorListRef &tensors, int64_t dim=0)
Definition: Functions.h:1326
at::Tensor trunc(const at::Tensor &self)
Definition: Functions.h:7891
at::Tensor std(const at::Tensor &self, bool unbiased)
Definition: Functions.h:7562
at::Tensor & diagonal_copy_out(at::Tensor &out, const at::Tensor &self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1)
Definition: Functions.h:25500
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_lu_factor_out(at::Tensor &LU, at::Tensor &pivots, const at::Tensor &A, bool pivot=true)
Definition: Functions.h:17828
const at::Tensor & sparse_resize_and_clear_out(const at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim)
Definition: Functions.h:23470
at::Tensor _sparse_log_softmax_backward_data(const at::Tensor &grad_output, const at::Tensor &output, int64_t dim, const at::Tensor &self)
Definition: Functions.h:8361
at::Tensor & _conj_physical_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:20088
at::Tensor & special_shifted_chebyshev_polynomial_w_outf(const at::Tensor &x, const at::Tensor &n, at::Tensor &out)
Definition: Functions.h:19837
at::Tensor nuclear_norm(const at::Tensor &self, bool keepdim=false)
Definition: Functions.h:8465
at::Tensor empty(at::IntArrayRef size, c10::optional< at::DimnameList > names, at::TensorOptions options={}, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:2592
at::Tensor & complex_outf(const at::Tensor &real, const at::Tensor &imag, at::Tensor &out)
Definition: Functions.h:1668
at::Tensor & _test_warn_in_autograd_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25343
at::Tensor & i0_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:10734
at::Tensor & reflection_pad1d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef padding, at::Tensor &out)
Definition: Functions.h:13674
at::Tensor & _cudnn_rnn_flatten_weight_symint_out(at::Tensor &out, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional)
Definition: Functions.h:19929
at::Tensor & upsample_bicubic2d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15236
at::Tensor sparse_compressed_tensor(const at::Tensor &compressed_indices, const at::Tensor &plain_indices, const at::Tensor &values, at::IntArrayRef size, at::TensorOptions options)
Definition: Functions.h:8637
at::Tensor & linalg_cond_out(at::Tensor &out, const at::Tensor &self, const c10::optional< at::Scalar > &p=c10::nullopt)
Definition: Functions.h:18333
at::Tensor & bincount_outf(const at::Tensor &self, const c10::optional< at::Tensor > &weights, int64_t minlength, at::Tensor &out)
Definition: Functions.h:20183
at::Tensor & special_shifted_chebyshev_polynomial_t_outf(const at::Tensor &x, const at::Tensor &n, at::Tensor &out)
Definition: Functions.h:19711
at::Tensor _reshape_alias_copy(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride)
Definition: Functions.h:18764
at::Tensor & le_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:10079
void _foreach_expm1_(at::TensorList self)
Definition: Functions.h:11938
at::Tensor trace_backward(const at::Tensor &grad, at::IntArrayRef sizes)
Definition: Functions.h:9913
inline ::std::tuple< at::Tensor, at::Tensor > std_mean(const at::Tensor &self, bool unbiased)
Definition: Functions.h:7577
at::Tensor & zero_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:23348
at::Tensor uniform(const at::Tensor &self, double from=0, double to=1, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:24189
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, int64_t, int64_t, int64_t, int64_t, at::Tensor > _scaled_dot_product_flash_attention(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, double dropout_p=0.0, bool is_causal=false, bool return_debug_mask=false)
Definition: Functions.h:19137
at::Tensor & linalg_lu_solve_out(at::Tensor &out, const at::Tensor &LU, const at::Tensor &pivots, const at::Tensor &B, bool left=true, bool adjoint=false)
Definition: Functions.h:17870
at::Tensor unfold_copy(const at::Tensor &self, int64_t dimension, int64_t size, int64_t step)
Definition: Functions.h:19078
void _foreach_acos_(at::TensorList self)
Definition: Functions.h:11858
at::Tensor matrix_exp_backward(const at::Tensor &self, const at::Tensor &grad)
Definition: Functions.h:4364
at::Tensor logical_and(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:1229
at::Tensor & arcsin_(at::Tensor &self)
Definition: Functions.h:963
at::Tensor & new_empty_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef size)
Definition: Functions.h:20873
at::Tensor & linalg_inv_outf(const at::Tensor &A, at::Tensor &out)
Definition: Functions.h:18155
at::Tensor fmod(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10909
inline ::std::tuple< at::Tensor &, at::Tensor & > geqrf_out(at::Tensor &a, at::Tensor &tau, const at::Tensor &self)
Definition: Functions.h:10575
at::Tensor _saturate_weight_to_fp16(const at::Tensor &weight)
Definition: Functions.h:9135
inline ::std::tuple< at::Tensor &, at::Tensor & > batch_norm_gather_stats_with_counts_outf(const at::Tensor &input, const at::Tensor &mean, const at::Tensor &invstd, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, double momentum, double eps, const at::Tensor &counts, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:22056
at::Tensor & _adaptive_avg_pool2d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:25080
at::Tensor & log10_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:4059
at::Tensor fft_fftfreq(int64_t n, double d=1.0, at::TensorOptions options={})
Definition: Functions.h:17735
at::Tensor & mkldnn_max_pool3d_backward_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &output, const at::Tensor &input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:21718
at::Tensor & special_laguerre_polynomial_l_outf(const at::Tensor &x, const at::Tensor &n, at::Tensor &out)
Definition: Functions.h:19543
at::Tensor & _foobar_out(at::Tensor &out, const at::Tensor &self, bool arg1=true, bool arg2=true, bool arg3=true)
Definition: Functions.h:25998
at::Tensor & outer_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &vec2)
Definition: Functions.h:18193
at::Tensor _test_serialization_subcmul(const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha=1)
Definition: Functions.h:18585
at::Tensor float_power(const at::Tensor &self, const at::Tensor &exponent)
Definition: Functions.h:11395
at::Tensor unfold_backward(const at::Tensor &grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step)
Definition: Functions.h:11317
at::Tensor & bitwise_right_shift_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:9796
at::Tensor _mkldnn_reshape(const at::Tensor &self, at::IntArrayRef shape)
Definition: Functions.h:6600
inline ::std::tuple< at::Tensor, at::Tensor > cummin(const at::Tensor &self, int64_t dim)
Definition: Functions.h:2043
at::Tensor & le_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10075
at::Tensor & index_reduce_outf(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &source, c10::string_view reduce, bool include_self, at::Tensor &out)
Definition: Functions.h:9476
at::Tensor constant_pad_nd_symint(const at::Tensor &self, c10::SymIntArrayRef pad, const at::Scalar &value=0)
Definition: Functions.h:1698
at::Tensor batch_norm_backward_elemt(const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &invstd, const c10::optional< at::Tensor > &weight, const at::Tensor &mean_dy, const at::Tensor &mean_dy_xmu, const at::Tensor &count)
Definition: Functions.h:5133
at::Tensor & nextafter_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:10974
at::Tensor & _reshape_alias_copy_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride)
Definition: Functions.h:25584
at::Tensor special_gammaincc(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:17459
at::Tensor & __rshift___outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:24115
const at::Tensor & _conv_depthwise2d_symint_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, const at::Tensor &out)
Definition: Functions.h:16596
at::Tensor & _values_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25766
inline ::std::tuple< at::Tensor &, at::Tensor & > matmul_backward_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &grad, const at::Tensor &self, const at::Tensor &other, ::std::array< bool, 2 > mask)
Definition: Functions.h:21655
at::Tensor & glu_outf(const at::Tensor &self, int64_t dim, at::Tensor &out)
Definition: Functions.h:12896
at::Tensor _values_copy(const at::Tensor &self)
Definition: Functions.h:18914
at::Tensor dist(const at::Tensor &self, const at::Tensor &other, const at::Scalar &p=2)
Definition: Functions.h:10767
at::Tensor log2(const at::Tensor &self)
Definition: Functions.h:4087
at::Tensor & clamp_min_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &min)
Definition: Functions.h:1598
at::Tensor sqrt(const at::Tensor &self)
Definition: Functions.h:7524
bool __dispatch_is_neg(const at::Tensor &self)
Definition: Functions.h:3752
at::Tensor & _mkldnn_transpose_(at::Tensor &self, int64_t dim0, int64_t dim1)
Definition: Functions.h:7791
at::Tensor & special_logsumexp_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim, bool keepdim=false)
Definition: Functions.h:17366
at::Tensor & nan_to_num_out(at::Tensor &out, const at::Tensor &self, c10::optional< double > nan=c10::nullopt, c10::optional< double > posinf=c10::nullopt, c10::optional< double > neginf=c10::nullopt)
Definition: Functions.h:3905
at::Tensor & _convert_indices_from_coo_to_csr_out(at::Tensor &out, const at::Tensor &self, int64_t size, bool out_int32=false)
Definition: Functions.h:12231
at::Tensor col2im_symint(const at::Tensor &self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride)
Definition: Functions.h:16882
at::Tensor & narrow_copy_outf(const at::Tensor &self, int64_t dim, int64_t start, int64_t length, at::Tensor &out)
Definition: Functions.h:4975
at::Tensor _sample_dirichlet(const at::Tensor &self, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:8266
at::Tensor & sqrt_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:7534
at::Tensor & special_chebyshev_polynomial_u_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19329
at::Tensor & quantize_per_tensor_out(at::Tensor &out, const at::Tensor &self, double scale, int64_t zero_point, at::ScalarType dtype)
Definition: Functions.h:23629
at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19749
at::Tensor upsample_trilinear3d_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15544
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > mps_convolution_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &self, const at::Tensor &grad_output, const at::Tensor &weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:21772
at::Tensor & round_(at::Tensor &self)
Definition: Functions.h:6610
at::Tensor reshape(const at::Tensor &self, at::IntArrayRef shape)
Definition: Functions.h:6534
at::Tensor & relu6_(at::Tensor &self)
Definition: Functions.h:6668
at::Tensor norm(const at::Tensor &self, const c10::optional< at::Scalar > &p, at::ScalarType dtype)
Definition: Functions.h:8371
at::Tensor & put_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &index, const at::Tensor &source, bool accumulate=false)
Definition: Functions.h:24030
at::Tensor & _pin_memory_outf(const at::Tensor &self, c10::optional< at::Device > device, at::Tensor &out)
Definition: Functions.h:22235
at::Tensor & hann_window_outf(int64_t window_length, at::Tensor &out)
Definition: Functions.h:21325
at::Tensor & matrix_power_out(at::Tensor &out, const at::Tensor &self, int64_t n)
Definition: Functions.h:4350
at::Tensor & grid_sampler_2d_outf(const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor &out)
Definition: Functions.h:21280
at::Tensor & triu_indices_out(at::Tensor &out, int64_t row, int64_t col, int64_t offset=0)
Definition: Functions.h:24259
void _foreach_trunc_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24898
at::Tensor take_along_dim(const at::Tensor &self, const at::Tensor &indices, c10::optional< int64_t > dim=c10::nullopt)
Definition: Functions.h:10266
at::Tensor & adaptive_avg_pool3d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, at::Tensor &out)
Definition: Functions.h:13320
at::Tensor & slice_copy_outf(const at::Tensor &self, int64_t dim, c10::optional< int64_t > start, c10::optional< int64_t > end, int64_t step, at::Tensor &out)
Definition: Functions.h:25670
inline ::std::tuple< at::Tensor &, at::Tensor & > matmul_backward_outf(const at::Tensor &grad, const at::Tensor &self, const at::Tensor &other, ::std::array< bool, 2 > mask, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:21659
void _foreach_tanh_(at::TensorList self)
Definition: Functions.h:12018
at::Tensor & negative_(at::Tensor &self)
Definition: Functions.h:6474
at::Tensor & special_gammainc_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:17440
at::Tensor & amax_outf(const at::Tensor &self, at::IntArrayRef dim, bool keepdim, at::Tensor &out)
Definition: Functions.h:4466
const at::Tensor & _conv_depthwise2d_out(const at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation)
Definition: Functions.h:16563
at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:5625
at::Tensor hardsigmoid(const at::Tensor &self)
Definition: Functions.h:12939
at::Tensor & ge_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10019
at::Tensor & mish_(at::Tensor &self)
Definition: Functions.h:6902
const at::Tensor & resize_out(const at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:21089
at::Tensor & mkldnn_convolution_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:21803
at::Tensor & log_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:4044
inline ::std::tuple< at::Tensor, at::Tensor > _native_multi_head_attention(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, int64_t embed_dim, int64_t num_head, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, const c10::optional< at::Tensor > &mask={}, bool need_weights=true, bool average_attn_weights=true, c10::optional< int64_t > mask_type=c10::nullopt)
Definition: Functions.h:19112
at::Tensor & _sobol_engine_ff_(at::Tensor &self, int64_t n, const at::Tensor &sobolstate, int64_t dimension, int64_t num_generated)
Definition: Functions.h:288
at::Tensor & scatter_add_outf(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &src, at::Tensor &out)
Definition: Functions.h:9581
inline ::std::tuple< at::Tensor &, at::Tensor & > triangular_solve_outf(const at::Tensor &self, const at::Tensor &A, bool upper, bool transpose, bool unitriangular, at::Tensor &X, at::Tensor &M)
Definition: Functions.h:10456
void _foreach_asin_(at::TensorList self)
Definition: Functions.h:11868
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > convolution_backward_symint_outf(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:20336
at::Tensor & _empty_affine_quantized_outf(at::IntArrayRef size, double scale, int64_t zero_point, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:21075
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor > _thnn_differentiable_lstm_cell_backward(const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, const at::Tensor &input_gates, const at::Tensor &hidden_gates, const c10::optional< at::Tensor > &input_bias, const c10::optional< at::Tensor > &hidden_bias, const at::Tensor &cx, const at::Tensor &cy)
Definition: Functions.h:9234
at::Tensor & bmm_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mat2)
Definition: Functions.h:1280
at::Tensor & diag_embed_out(at::Tensor &out, const at::Tensor &self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1)
Definition: Functions.h:20598
at::Tensor & as_strided_scatter_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional< c10::SymInt > storage_offset=c10::nullopt)
Definition: Functions.h:22748
at::Tensor max_pool2d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:4486
at::Tensor & digamma_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:10678
inline ::std::tuple< at::Tensor &, at::Tensor & > median_out(at::Tensor &values, at::Tensor &indices, const at::Tensor &self, int64_t dim, bool keepdim=false)
Definition: Functions.h:4583
at::Tensor acos(const at::Tensor &self)
Definition: Functions.h:483
at::Tensor reflection_pad2d_backward_symint(const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:13894
at::Tensor & special_erfc_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17100
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_layer_norm_backward_outf(const at::Tensor &grad_out, const at::Tensor &input, at::IntArrayRef normalized_shape, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21577
at::Tensor & tril_indices_outf(int64_t row, int64_t col, int64_t offset, at::Tensor &out)
Definition: Functions.h:24254
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor > mkldnn_rnn_layer_backward(const at::Tensor &input, const at::Tensor &weight1, const at::Tensor &weight2, const at::Tensor &weight3, const at::Tensor &weight4, const at::Tensor &hx_, const at::Tensor &cx_tmp, const at::Tensor &output, const at::Tensor &hy_, const at::Tensor &cy_, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor &workspace)
Definition: Functions.h:4718
at::Tensor & hardswish_backward_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:25020
at::Tensor & isin_out(at::Tensor &out, const at::Tensor &elements, const at::Tensor &test_elements, bool assume_unique=false, bool invert=false)
Definition: Functions.h:3680
at::Tensor & upsample_nearest3d_outf(const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_d, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:16094
void _foreach_addcdiv_outf(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar &value, at::TensorList out)
Definition: Functions.h:24907
inline ::std::tuple< at::Tensor, at::Tensor > _aminmax(const at::Tensor &self)
Definition: Functions.h:4369
at::Tensor _log_softmax(const at::Tensor &self, int64_t dim, bool half_to_float)
Definition: Functions.h:4223
bool _has_same_storage_numel(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:138
at::Tensor & to_sparse_out(at::Tensor &out, const at::Tensor &self, int64_t sparse_dim)
Definition: Functions.h:23539
at::Tensor & special_polygamma_outf(int64_t n, const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17356
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _unique2(const at::Tensor &self, bool sorted=true, bool return_inverse=false, bool return_counts=false)
Definition: Functions.h:7954
at::Tensor movedim(const at::Tensor &self, at::IntArrayRef source, at::IntArrayRef destination)
Definition: Functions.h:5331
at::Tensor upsample_trilinear3d(const at::Tensor &input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14565
bool is_same_size(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3767
at::Tensor & cosh_(at::Tensor &self)
Definition: Functions.h:1911
at::Tensor & any_out(at::Tensor &out, const at::Tensor &self, int64_t dim, bool keepdim=false)
Definition: Functions.h:680
at::Tensor & mv_outf(const at::Tensor &self, const at::Tensor &vec, at::Tensor &out)
Definition: Functions.h:4923
at::Tensor & _masked_softmax_backward_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &output, const at::Tensor &mask, c10::optional< int64_t > dim=c10::nullopt)
Definition: Functions.h:24021
at::Tensor huber_loss(const at::Tensor &self, const at::Tensor &target, int64_t reduction=at::Reduction::Mean, double delta=1.0)
Definition: Functions.h:12812
inline ::std::tuple< at::Tensor, at::Tensor > linalg_eig(const at::Tensor &self)
Definition: Functions.h:18048
at::Tensor & _add_relu_outf(const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:564
at::Tensor & crow_indices_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25793
at::Tensor & _dirichlet_grad_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &alpha, const at::Tensor &total)
Definition: Functions.h:23154
at::Tensor sparse_resize_and_clear(const at::Tensor &self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim)
Definition: Functions.h:23479
at::Tensor & special_sinc_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17394
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > linalg_ldl_factor_ex_outf(const at::Tensor &self, bool hermitian, bool check_errors, at::Tensor &LD, at::Tensor &pivots, at::Tensor &info)
Definition: Functions.h:17921
at::Tensor & slow_conv_dilated3d_symint_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor &out)
Definition: Functions.h:25283
at::Tensor & slow_conv3d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0)
Definition: Functions.h:16651
void _foreach_tan_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24817
at::Tensor cdist(const at::Tensor &x1, const at::Tensor &x2, double p=2, c10::optional< int64_t > compute_mode=c10::nullopt)
Definition: Functions.h:5286
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_group_norm_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps)
Definition: Functions.h:21402
at::Tensor & linear_outf(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::Tensor &out)
Definition: Functions.h:3928
at::Tensor & embedding_symint_outf(const at::Tensor &weight, const at::Tensor &indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor &out)
Definition: Functions.h:20702
at::Tensor miopen_convolution_transpose_symint(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:4766
at::Tensor trapz(const at::Tensor &y, const at::Tensor &x, int64_t dim=-1)
Definition: Functions.h:7836
at::Tensor mkldnn_max_pool2d(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:4491
at::Tensor & special_bessel_j0_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:19221
at::Tensor & fft_rfftfreq_out(at::Tensor &out, int64_t n, double d=1.0)
Definition: Functions.h:17762
at::Tensor avg_pool3d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional< int64_t > divisor_override)
Definition: Functions.h:13496
at::Tensor view_as_complex(const at::Tensor &self)
Definition: Functions.h:405
at::Tensor & special_ndtri_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:16988
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > mkldnn_linear_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &self, const at::Tensor &grad_output, const at::Tensor &weight, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:21646
at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor &crow_indices, const at::Tensor &col_indices, const at::Tensor &values, at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:8736
inline ::std::vector< at::Tensor > _foreach_sinh(at::TensorList self)
Definition: Functions.h:12033
at::Tensor & clip_out(at::Tensor &out, const at::Tensor &self, const c10::optional< at::Scalar > &min, const c10::optional< at::Scalar > &max=c10::nullopt)
Definition: Functions.h:1636
at::Tensor & zeros_like_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:23127
at::Tensor soft_margin_loss(const at::Tensor &self, const at::Tensor &target, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:12840
at::Tensor scalar_tensor(const at::Scalar &s, at::TensorOptions options={})
Definition: Functions.h:5429
void unsafe_split_with_sizes_symint_out(at::TensorList out, const at::Tensor &self, c10::SymIntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:22836
at::Tensor & sqrt_(at::Tensor &self)
Definition: Functions.h:7529
at::Tensor & _make_per_tensor_quantized_tensor_out(at::Tensor &out, const at::Tensor &self, double scale, int64_t zero_point)
Definition: Functions.h:23710
at::Tensor & _spdiags_out(at::Tensor &out, const at::Tensor &diagonals, const at::Tensor &offsets, at::IntArrayRef shape, c10::optional< at::Layout > layout=c10::nullopt)
Definition: Functions.h:23280
inline ::std::tuple< at::Tensor &, at::Tensor & > cummax_out(at::Tensor &values, at::Tensor &indices, const at::Tensor &self, int64_t dim)
Definition: Functions.h:2015
void _foreach_zero_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24646
at::Tensor & copy_outf(const at::Tensor &self, const at::Tensor &src, bool non_blocking, at::Tensor &out)
Definition: Functions.h:20422
at::Tensor & _make_per_channel_quantized_tensor_outf(const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, int64_t axis, at::Tensor &out)
Definition: Functions.h:23723
at::Tensor special_laguerre_polynomial_l(const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19524
at::Tensor & put_outf(const at::Tensor &self, const at::Tensor &index, const at::Tensor &source, bool accumulate, at::Tensor &out)
Definition: Functions.h:24034
at::Tensor & _nested_tensor_from_tensor_list_out(at::Tensor &out, at::TensorList list, c10::optional< at::ScalarType > dtype=c10::nullopt, c10::optional< at::Layout > layout=c10::nullopt, c10::optional< at::Device > device=c10::nullopt, c10::optional< bool > pin_memory=c10::nullopt)
Definition: Functions.h:25384
at::Tensor & special_chebyshev_polynomial_t_outf(const at::Tensor &x, const at::Tensor &n, at::Tensor &out)
Definition: Functions.h:19291
at::Tensor & reflection_pad3d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:13927
void _foreach_cosh_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24727
at::Tensor _masked_softmax_backward(const at::Tensor &grad_output, const at::Tensor &output, const at::Tensor &mask, c10::optional< int64_t > dim=c10::nullopt)
Definition: Functions.h:9429
at::Tensor rrelu_with_noise_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &noise, const at::Scalar &lower, const at::Scalar &upper, bool training, bool self_is_result)
Definition: Functions.h:13109
at::Tensor lu_solve(const at::Tensor &self, const at::Tensor &LU_data, const at::Tensor &LU_pivots)
Definition: Functions.h:10631
at::Tensor & _upsample_nearest_exact3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16127
at::Tensor & poisson_outf(const at::Tensor &self, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:23176
at::Tensor _masked_softmax(const at::Tensor &self, const at::Tensor &mask, c10::optional< int64_t > dim=c10::nullopt, c10::optional< int64_t > mask_type=c10::nullopt)
Definition: Functions.h:9424
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > native_layer_norm_backward_symint(const at::Tensor &grad_out, const at::Tensor &input, c10::SymIntArrayRef normalized_shape, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:3884
void _foreach_minimum_out(at::TensorList out, at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:24484
at::Tensor index(const at::Tensor &self, const c10::List< c10::optional< at::Tensor > > &indices)
Definition: Functions.h:3622
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > linalg_lstsq_outf(const at::Tensor &self, const at::Tensor &b, c10::optional< double > rcond, c10::optional< c10::string_view > driver, at::Tensor &solution, at::Tensor &residuals, at::Tensor &rank, at::Tensor &singular_values)
Definition: Functions.h:17963
at::Tensor & adaptive_avg_pool3d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::Tensor &grad_input)
Definition: Functions.h:13379
at::Tensor & select_scatter_symint_outf(const at::Tensor &self, const at::Tensor &src, int64_t dim, c10::SymInt index, at::Tensor &out)
Definition: Functions.h:22706
at::Tensor q_per_channel_scales(const at::Tensor &self)
Definition: Functions.h:9030
at::Tensor & adaptive_avg_pool3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:13375
at::Tensor _triton_multi_head_attention(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, int64_t embed_dim, int64_t num_head, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, const c10::optional< at::Tensor > &mask={})
Definition: Functions.h:19187
at::Tensor & argmax_out(at::Tensor &out, const at::Tensor &self, c10::optional< int64_t > dim=c10::nullopt, bool keepdim=false)
Definition: Functions.h:758
at::Tensor hypot(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:10937
at::Tensor & clamp_max_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &max)
Definition: Functions.h:1560
at::Tensor multi_margin_loss(const at::Tensor &self, const at::Tensor &target, const at::Scalar &p=1, const at::Scalar &margin=1, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:12296
bool is_nonzero(const at::Tensor &self)
Definition: Functions.h:3762
void _foreach_neg_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24808
at::Tensor adaptive_avg_pool1d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:526
at::Tensor & nanmean_outf(const at::Tensor &self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:4568
at::Tensor & col2im_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride)
Definition: Functions.h:16827
at::Tensor nll_loss2d(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100)
Definition: Functions.h:12621
void _foreach_round_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24849
at::Tensor & cholesky_inverse_outf(const at::Tensor &self, bool upper, at::Tensor &out)
Definition: Functions.h:10556
at::Tensor & _upsample_bicubic2d_aa_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15313
at::Tensor & glu_backward_jvp_outf(const at::Tensor &grad_x, const at::Tensor &grad_glu, const at::Tensor &x, const at::Tensor &dgrad_glu, const at::Tensor &dx, int64_t dim, at::Tensor &out)
Definition: Functions.h:25015
at::Tensor & negative_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:6483
at::Tensor & polygamma_out(at::Tensor &out, int64_t n, const at::Tensor &self)
Definition: Functions.h:10692
at::Tensor threshold(const at::Tensor &self, const at::Scalar &threshold, const at::Scalar &value)
Definition: Functions.h:7738
inline ::std::vector< at::Tensor > _foreach_acos(at::TensorList self)
Definition: Functions.h:11853
at::Tensor affine_grid_generator(const at::Tensor &theta, at::IntArrayRef size, bool align_corners)
Definition: Functions.h:617
at::Tensor & atan_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:987
at::Tensor gelu(const at::Tensor &self, c10::string_view approximate="none")
Definition: Functions.h:6702
at::Tensor & special_modified_bessel_i0_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:19617
at::Tensor & transpose_copy_out(at::Tensor &out, const at::Tensor &self, int64_t dim0, int64_t dim1)
Definition: Functions.h:25739
at::Tensor & gather_out(at::Tensor &out, const at::Tensor &self, int64_t dim, const at::Tensor &index, bool sparse_grad=false)
Definition: Functions.h:10364
at::Tensor cauchy(const at::Tensor &self, double median=0, double sigma=1, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:24203
at::Tensor isnan(const at::Tensor &self)
Definition: Functions.h:3722
at::Tensor & permute_copy_outf(const at::Tensor &self, at::IntArrayRef dims, at::Tensor &out)
Definition: Functions.h:25557
at::Tensor & softshrink_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &lambd, at::Tensor &grad_input)
Definition: Functions.h:13165
at::Tensor & rsub_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha=1)
Definition: Functions.h:23367
at::Tensor & nll_loss_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, at::Tensor &out)
Definition: Functions.h:12368
at::Tensor & diagonal_backward_outf(const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor &out)
Definition: Functions.h:20618
at::Tensor & empty_symint_outf(c10::SymIntArrayRef size, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:2864
at::Tensor & linalg_lu_solve_outf(const at::Tensor &LU, const at::Tensor &pivots, const at::Tensor &B, bool left, bool adjoint, at::Tensor &out)
Definition: Functions.h:17874
at::Tensor & special_modified_bessel_k0_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:19645
at::Tensor & slow_conv_dilated2d_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), at::IntArrayRef dilation=1)
Definition: Functions.h:25228
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > miopen_rnn_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4, const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state)
Definition: Functions.h:21993
at::Tensor & nll_loss2d_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, int64_t ignore_index, at::Tensor &out)
Definition: Functions.h:12588
at::Tensor norm_except_dim(const at::Tensor &v, int64_t pow=2, int64_t dim=0)
Definition: Functions.h:8111
at::Tensor argsort(const at::Tensor &self, int64_t dim=-1, bool descending=false)
Definition: Functions.h:11246
at::Tensor _slow_conv2d_forward(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding)
Definition: Functions.h:16544
at::Tensor & rrelu_with_noise_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &noise, const at::Scalar &lower=0.125, const at::Scalar &upper=0.3333333333333333, bool training=false, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:13095
at::Tensor & _sparse_softmax_backward_data_outf(const at::Tensor &grad_output, const at::Tensor &output, int64_t dim, const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:23257
at::Tensor & segment_reduce_out(at::Tensor &out, const at::Tensor &data, c10::string_view reduce, const c10::optional< at::Tensor > &lengths={}, const c10::optional< at::Tensor > &indices={}, const c10::optional< at::Tensor > &offsets={}, int64_t axis=0, bool unsafe=false, const c10::optional< at::Scalar > &initial=c10::nullopt)
Definition: Functions.h:25366
at::Tensor & select_backward_out(at::Tensor &out, const at::Tensor &grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index)
Definition: Functions.h:22532
at::Tensor & _sparse_csr_prod_outf(const at::Tensor &self, at::IntArrayRef dim, bool keepdim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:23239
inline ::std::tuple< at::Tensor &, at::Tensor & > kthvalue_out(at::Tensor &values, at::Tensor &indices, const at::Tensor &self, int64_t k, int64_t dim=-1, bool keepdim=false)
Definition: Functions.h:3806
at::Tensor & trace_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:24268
at::Tensor & diagonal_scatter_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1)
Definition: Functions.h:22717
at::Tensor & mkldnn_linear_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::Tensor &out)
Definition: Functions.h:21623
inline ::std::tuple< at::Tensor &, at::Tensor & > grid_sampler_2d_backward_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array< bool, 2 > output_mask)
Definition: Functions.h:21285
at::Tensor & angle_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:395
at::Tensor & glu_backward_jvp_out(at::Tensor &out, const at::Tensor &grad_x, const at::Tensor &grad_glu, const at::Tensor &x, const at::Tensor &dgrad_glu, const at::Tensor &dx, int64_t dim)
Definition: Functions.h:25011
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > miopen_rnn_outf(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4)
Definition: Functions.h:21997
at::Tensor & logical_and_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:1234
at::Tensor & floor_divide_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:3151
at::Tensor alpha_dropout(const at::Tensor &input, double p, bool train)
Definition: Functions.h:333
at::Tensor max_unpool2d(const at::Tensor &self, const at::Tensor &indices, at::IntArrayRef output_size)
Definition: Functions.h:13622
at::Tensor erf(const at::Tensor &self)
Definition: Functions.h:2928
at::Tensor reshape_symint(const at::Tensor &self, c10::SymIntArrayRef shape)
Definition: Functions.h:6545
at::Tensor softshrink_backward(const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &lambd)
Definition: Functions.h:13170
at::Tensor special_chebyshev_polynomial_w(const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19398
at::Tensor & normal_symint_outf(double mean, double std, c10::SymIntArrayRef size, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:11552
at::Tensor & linalg_vector_norm_outf(const at::Tensor &self, const at::Scalar &ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:18253
at::Tensor _nested_tensor_from_mask(const at::Tensor &t, const at::Tensor &mask, bool mask_check=true)
Definition: Functions.h:7851
at::Tensor & narrow_copy_symint_outf(const at::Tensor &self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor &out)
Definition: Functions.h:4997
at::Tensor & _upsample_nearest_exact3d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16149
at::Tensor & empty_strided_symint_outf(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor &out)
Definition: Functions.h:21220
at::Tensor & nll_loss2d_symint_outf(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor &out)
Definition: Functions.h:12610
at::Tensor & slice_scatter_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &src, int64_t dim=0, c10::optional< c10::SymInt > start=c10::nullopt, c10::optional< c10::SymInt > end=c10::nullopt, c10::SymInt step=1)
Definition: Functions.h:22651
at::Tensor & tensordot_outf(const at::Tensor &self, const at::Tensor &other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor &out)
Definition: Functions.h:7733
at::Tensor & clone_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:23307
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _thnn_fused_lstm_cell_outf(const at::Tensor &input_gates, const at::Tensor &hidden_gates, const at::Tensor &cx, const c10::optional< at::Tensor > &input_bias, const c10::optional< at::Tensor > &hidden_bias, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:23818
void _assert_async(const at::Tensor &self)
Definition: Functions.h:148
at::Tensor & cos_(at::Tensor &self)
Definition: Functions.h:1892
at::Tensor count_nonzero(const at::Tensor &self, at::IntArrayRef dim)
Definition: Functions.h:1930
at::Tensor upsample_linear1d_symint(const at::Tensor &input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14510
at::Tensor & linalg_pinv_outf(const at::Tensor &self, const c10::optional< at::Tensor > &atol, const c10::optional< at::Tensor > &rtol, bool hermitian, at::Tensor &out)
Definition: Functions.h:18365
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _transformer_decoder_only_layer_fwd_outf(const at::Tensor &src, int64_t embed_dim, int64_t num_heads, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor &norm_weight_1, const at::Tensor &norm_bias_1, const at::Tensor &norm_weight_2, const at::Tensor &norm_bias_2, const at::Tensor &ffn_weight_1, const at::Tensor &ffn_bias_1, const at::Tensor &ffn_weight_2, const at::Tensor &ffn_bias_2, const c10::optional< at::Tensor > &mask, const c10::optional< at::Tensor > &incr_key, const c10::optional< at::Tensor > &incr_value, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:25984
at::Tensor index_reduce(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &source, c10::string_view reduce, bool include_self=true)
Definition: Functions.h:9481
at::Tensor special_erfcx(const at::Tensor &self)
Definition: Functions.h:17109
at::Tensor matmul(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4326
at::Tensor & expm1_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:3014
inline ::std::vector< at::Tensor > _foreach_cosh(at::TensorList self)
Definition: Functions.h:11903
at::Tensor & gt_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10131
at::Tensor & linalg_matrix_exp_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25307
at::Tensor & slow_conv_transpose2d_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), at::IntArrayRef dilation=1)
Definition: Functions.h:16411
bool _nnpack_available()
Definition: Functions.h:5148
at::Tensor pairwise_distance(const at::Tensor &x1, const at::Tensor &x2, double p=2, double eps=1e-06, bool keepdim=false)
Definition: Functions.h:5281
at::Tensor & igammac_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:10960
at::Tensor & divide_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:2325
inline ::std::tuple< at::Tensor, at::Tensor > linalg_eigh(const at::Tensor &self, c10::string_view UPLO="L")
Definition: Functions.h:18090
at::Tensor & upsample_nearest1d_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, c10::optional< double > scales, at::Tensor &out)
Definition: Functions.h:15588
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _linalg_det_outf(const at::Tensor &A, at::Tensor &result, at::Tensor &LU, at::Tensor &pivots)
Definition: Functions.h:17888
at::Tensor & log1p_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:4082
at::Tensor & to_padded_tensor_out(at::Tensor &out, const at::Tensor &self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt)
Definition: Functions.h:25900
at::Tensor & ne_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9935
at::Tensor & _upsample_nearest_exact1d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15753
void _foreach_sigmoid_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24889
at::Tensor nll_loss2d_symint(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100)
Definition: Functions.h:12632
inline ::std::tuple< at::Tensor, at::Tensor > _scaled_dot_product_efficient_attention(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, bool compute_log_sumexp, bool is_causal=false)
Definition: Functions.h:19147
at::Tensor sum(const at::Tensor &self, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:7472
at::Tensor & mse_loss_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:12254
at::Tensor & replication_pad1d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14059
at::Tensor & frobenius_norm_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim, bool keepdim=false)
Definition: Functions.h:8456
at::Tensor & masked_scatter_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mask, const at::Tensor &source)
Definition: Functions.h:24003
at::Tensor atleast_1d(const at::Tensor &self)
Definition: Functions.h:1015
at::Tensor column_stack(at::TensorList tensors)
Definition: Functions.h:16893
at::Tensor & select_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor &out)
Definition: Functions.h:22565
at::Tensor & nanquantile_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &q, c10::optional< int64_t > dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear")
Definition: Functions.h:11153
at::Tensor & binomial_out(at::Tensor &out, const at::Tensor &count, const at::Tensor &prob, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:23181
at::Tensor & hardswish_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:13000
at::Tensor & _standard_gamma_grad_outf(const at::Tensor &self, const at::Tensor &output, at::Tensor &out)
Definition: Functions.h:23140
at::Tensor & arange_outf(const at::Scalar &end, at::Tensor &out)
Definition: Functions.h:734
at::Tensor empty_like(const at::Tensor &self, at::TensorOptions options={}, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:2875
at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15390
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _embedding_bag_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, const at::Tensor &weight, const at::Tensor &indices, const at::Tensor &offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional< at::Tensor > &per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1)
Definition: Functions.h:20780
at::Tensor & special_zeta_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:17254
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_group_norm_backward_symint_outf(const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21479
at::Tensor softshrink(const at::Tensor &self, const at::Scalar &lambd=0.5)
Definition: Functions.h:13156
at::Tensor _cast_Char(const at::Tensor &self, bool non_blocking=false)
Definition: Functions.h:88
at::Tensor rnn_tanh_cell(const at::Tensor &input, const at::Tensor &hx, const at::Tensor &w_ih, const at::Tensor &w_hh, const c10::optional< at::Tensor > &b_ih={}, const c10::optional< at::Tensor > &b_hh={})
Definition: Functions.h:9304
at::Tensor linalg_vecdot(const at::Tensor &x, const at::Tensor &y, int64_t dim=-1)
Definition: Functions.h:17982
at::Tensor _histogramdd_from_bin_cts(const at::Tensor &self, at::IntArrayRef bins, c10::optional< at::ArrayRef< double > > range=c10::nullopt, const c10::optional< at::Tensor > &weight={}, bool density=false)
Definition: Functions.h:10875
at::Tensor & upsample_linear1d_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:14785
at::Tensor & logcumsumexp_outf(const at::Tensor &self, int64_t dim, at::Tensor &out)
Definition: Functions.h:4274
at::Tensor & _masked_softmax_backward_outf(const at::Tensor &grad_output, const at::Tensor &output, const at::Tensor &mask, c10::optional< int64_t > dim, at::Tensor &out)
Definition: Functions.h:24025
at::Tensor & replication_pad1d_outf(const at::Tensor &self, at::IntArrayRef padding, at::Tensor &out)
Definition: Functions.h:14048
at::Tensor & slow_conv3d_forward_symint_outf(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor &output)
Definition: Functions.h:16750
at::Tensor _grid_sampler_2d_cpu_fallback(const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners)
Definition: Functions.h:3348
at::Tensor & new_full_symint_outf(const at::Tensor &self, c10::SymIntArrayRef size, const at::Scalar &fill_value, at::Tensor &out)
Definition: Functions.h:20972
at::Tensor _make_dual(const at::Tensor &primal, const at::Tensor &tangent, int64_t level)
Definition: Functions.h:123
at::Tensor & reflection_pad3d_backward_symint_outf(const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:14004
at::Tensor & _mkldnn_transpose_out(at::Tensor &out, const at::Tensor &self, int64_t dim0, int64_t dim1)
Definition: Functions.h:22885
at::Tensor & repeat_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef repeats)
Definition: Functions.h:22461
void _foreach_exp_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24641
at::Tensor & mul_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:4885
at::Tensor & reflection_pad3d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:13982
at::Tensor & _upsample_bicubic2d_aa_symint_outf(const at::Tensor &self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &out)
Definition: Functions.h:15324
inline ::std::tuple< at::Tensor &, at::Tensor & > histogram_out(at::Tensor &hist, at::Tensor &bin_edges, const at::Tensor &self, const at::Tensor &bins, const c10::optional< at::Tensor > &weight={}, bool density=false)
Definition: Functions.h:10842
inline ::std::tuple< at::Tensor &, at::Tensor & > nll_loss_forward_symint_out(at::Tensor &output, at::Tensor &total_weight, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index)
Definition: Functions.h:12467
at::Tensor & _nested_from_padded_and_nested_example_out(at::Tensor &out, const at::Tensor &padded, const at::Tensor &nt_example)
Definition: Functions.h:22966
at::Tensor & linalg_cond_outf(const at::Tensor &self, const c10::optional< at::Scalar > &p, at::Tensor &out)
Definition: Functions.h:18337
at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, at::Tensor &out)
Definition: Functions.h:5647
at::Tensor & tril_outf(const at::Tensor &self, int64_t diagonal, at::Tensor &out)
Definition: Functions.h:9880
at::Tensor roll(const at::Tensor &self, at::IntArrayRef shifts, at::IntArrayRef dims={})
Definition: Functions.h:7816
at::Tensor mul(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4876
at::Tensor & logit_out(at::Tensor &out, const at::Tensor &self, c10::optional< double > eps=c10::nullopt)
Definition: Functions.h:6950
at::Tensor _indices_copy(const at::Tensor &self)
Definition: Functions.h:18909
void _foreach_div_outf(at::TensorList self, const at::Scalar &scalar, at::TensorList out)
Definition: Functions.h:24452
at::Tensor _segment_reduce_backward(const at::Tensor &grad, const at::Tensor &output, const at::Tensor &data, c10::string_view reduce, const c10::optional< at::Tensor > &lengths={}, const c10::optional< at::Tensor > &offsets={}, int64_t axis=0, const c10::optional< at::Scalar > &initial=c10::nullopt)
Definition: Functions.h:18650
at::Tensor & fmin_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:11031
at::Tensor & _pdist_forward_out(at::Tensor &out, const at::Tensor &self, double p=2)
Definition: Functions.h:22186
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_batch_norm_outf(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, bool training, double momentum, double eps, at::Tensor &out, at::Tensor &save_mean, at::Tensor &save_invstd)
Definition: Functions.h:5061
inline ::std::tuple<::std::vector< at::Tensor >, at::Tensor > _amp_foreach_non_finite_check_and_unscale(at::TensorList self, const at::Tensor &found_inf, const at::Tensor &inv_scale)
Definition: Functions.h:24402
at::Tensor & slow_conv_dilated3d_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), at::IntArrayRef dilation=1)
Definition: Functions.h:25272
at::Tensor & randint_like_outf(const at::Tensor &self, int64_t high, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:22350
at::Tensor ormqr(const at::Tensor &self, const at::Tensor &input2, const at::Tensor &input3, bool left=true, bool transpose=false)
Definition: Functions.h:10612
at::Tensor & isin_outf(const at::Tensor &elements, const at::Tensor &test_elements, bool assume_unique, bool invert, at::Tensor &out)
Definition: Functions.h:3684
at::Tensor & range_out(at::Tensor &out, const at::Scalar &start, const at::Scalar &end)
Definition: Functions.h:6408
at::Tensor _test_autograd_multiple_dispatch(const at::Tensor &self)
Definition: Functions.h:18625
void _foreach_clamp_min_(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11623
at::Tensor & _pdist_forward_outf(const at::Tensor &self, double p, at::Tensor &out)
Definition: Functions.h:22190
at::Tensor & mkldnn_max_pool2d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:21691
at::Tensor special_bessel_j0(const at::Tensor &self)
Definition: Functions.h:19216
at::Tensor _unsafe_view_symint(const at::Tensor &self, c10::SymIntArrayRef size)
Definition: Functions.h:7970
at::Tensor & _fw_primal_copy_outf(const at::Tensor &self, int64_t level, at::Tensor &out)
Definition: Functions.h:25397
inline ::std::tuple< at::Tensor, at::Tensor > nll_loss2d_forward_symint(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index)
Definition: Functions.h:12698
void _foreach_log1p_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24790
at::Tensor & hardsigmoid_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:12949
at::Tensor & randn_out(at::Tensor &out, at::IntArrayRef size)
Definition: Functions.h:6257
at::Tensor & asinh_(at::Tensor &self)
Definition: Functions.h:824
bool is_vulkan_available()
Definition: Functions.h:5143
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _cudnn_rnn_outf(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const c10::optional< at::Tensor > &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4)
Definition: Functions.h:19962
at::Tensor & replication_pad1d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, c10::SymIntArrayRef padding)
Definition: Functions.h:14125
at::Tensor & clamp_min_(at::Tensor &self, const at::Scalar &min)
Definition: Functions.h:1588
at::Tensor & special_airy_ai_outf(const at::Tensor &x, at::Tensor &out)
Definition: Functions.h:19201
at::Tensor & _standard_gamma_outf(const at::Tensor &self, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:23149
at::Tensor igamma(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:10951
at::Tensor ge(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:10028
at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor &qtensor, at::TensorOptions options={}, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:2822
at::Tensor & upsample_linear1d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:14851
at::Tensor & tanh_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:7719
at::Tensor margin_ranking_loss(const at::Tensor &input1, const at::Tensor &input2, const at::Tensor &target, double margin=0.0, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:4321
inline ::std::tuple< at::Tensor &, at::Tensor & > _cudnn_ctc_loss_outf(const at::Tensor &log_probs, const at::Tensor &targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:19902
at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &indices)
Definition: Functions.h:13403
at::Tensor special_erfc(const at::Tensor &self)
Definition: Functions.h:17095
at::Tensor & special_multigammaln_out(at::Tensor &out, const at::Tensor &self, int64_t p)
Definition: Functions.h:17469
at::Tensor sinc(const at::Tensor &self)
Definition: Functions.h:6978
at::Tensor & float_power_outf(const at::Tensor &self, const at::Tensor &exponent, at::Tensor &out)
Definition: Functions.h:11390
at::Tensor & replication_pad3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14367
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor,::std::vector< at::Tensor > > miopen_rnn_backward(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, const at::Tensor &output, const c10::optional< at::Tensor > &grad_output, const c10::optional< at::Tensor > &grad_hy, const c10::optional< at::Tensor > &grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, const at::Tensor &reserve, ::std::array< bool, 4 > output_mask)
Definition: Functions.h:4814
at::Tensor & mm_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mat2)
Definition: Functions.h:4824
at::Tensor & _pdist_backward_out(at::Tensor &out, const at::Tensor &grad, const at::Tensor &self, double p, const at::Tensor &pdist)
Definition: Functions.h:22195
at::Tensor & as_strided_copy_symint_out(at::Tensor &out, const at::Tensor &self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional< c10::SymInt > storage_offset=c10::nullopt)
Definition: Functions.h:25469
at::Tensor & random_out(at::Tensor &out, const at::Tensor &self, int64_t from, c10::optional< int64_t > to, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:24138
at::Tensor to_mkldnn_backward(const at::Tensor &grad, const at::Tensor &input)
Definition: Functions.h:8980
at::Tensor & to_sparse_csc_out(at::Tensor &out, const at::Tensor &self, c10::optional< int64_t > dense_dim=c10::nullopt)
Definition: Functions.h:23566
at::Tensor & special_shifted_chebyshev_polynomial_t_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19707
at::Tensor & blackman_window_out(at::Tensor &out, int64_t window_length)
Definition: Functions.h:20188
at::Tensor swapdims(const at::Tensor &self, int64_t dim0, int64_t dim1)
Definition: Functions.h:10509
at::Tensor masked_select_backward(const at::Tensor &grad, const at::Tensor &input, const at::Tensor &mask)
Definition: Functions.h:10335
at::Tensor divide(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:2316
at::Tensor & _upsample_nearest_exact3d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16281
at::Tensor & not_equal_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:9967
at::Tensor logaddexp(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4115
at::Tensor special_bessel_j1(const at::Tensor &self)
Definition: Functions.h:19230
at::Tensor & masked_fill_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mask, const at::Scalar &value)
Definition: Functions.h:23985
const at::Tensor & fft_ihfftn_outf(const at::Tensor &self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional< c10::string_view > norm, const at::Tensor &out)
Definition: Functions.h:17730
at::Tensor & q_per_channel_scales_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:23687
at::Tensor & erfinv_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:10715
at::Tensor & linalg_cholesky_out(at::Tensor &out, const at::Tensor &self, bool upper=false)
Definition: Functions.h:17800
void unsafe_split_symint_outf(const at::Tensor &self, c10::SymInt split_size, int64_t dim, at::TensorList out)
Definition: Functions.h:22803
at::Tensor & new_ones_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:21027
at::Tensor & fractional_max_pool2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor &indices, at::Tensor &grad_input)
Definition: Functions.h:13519
at::Tensor renorm(const at::Tensor &self, const at::Scalar &p, int64_t dim, const at::Scalar &maxnorm)
Definition: Functions.h:11312
at::Tensor & log_softmax_out(at::Tensor &out, const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:4209
at::Tensor special_i1(const at::Tensor &self)
Definition: Functions.h:17305
at::Tensor & _standard_gamma_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:23145
at::Tensor & fft_fftn_outf(const at::Tensor &self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17660
void _foreach_lerp_out(at::TensorList out, at::TensorList self, at::TensorList tensors1, at::TensorList weights)
Definition: Functions.h:24966
at::Tensor & copysign_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:1177
at::Tensor poisson_nll_loss(const at::Tensor &input, const at::Tensor &target, bool log_input, bool full, double eps, int64_t reduction)
Definition: Functions.h:5386
at::Tensor & bitwise_not_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:1164
at::Tensor & fft_ifft2_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17586
inline ::std::tuple< at::Tensor &, at::Tensor & > adaptive_max_pool3d_outf(const at::Tensor &self, at::IntArrayRef output_size, at::Tensor &out, at::Tensor &indices)
Definition: Functions.h:13421
at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor &out)
Definition: Functions.h:21960
void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:24421
at::Tensor fmin(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:11022
at::Tensor _index_put_impl(const at::Tensor &self, const c10::List< c10::optional< at::Tensor > > &indices, const at::Tensor &values, bool accumulate=false, bool unsafe=false)
Definition: Functions.h:21508
at::Tensor & all_out(at::Tensor &out, const at::Tensor &self, int64_t dim, bool keepdim=false)
Definition: Functions.h:647
at::Tensor neg(const at::Tensor &self)
Definition: Functions.h:6450
at::Tensor t(const at::Tensor &self)
Definition: Functions.h:7681
void _foreach_clamp_max_(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11633
at::Tensor & special_chebyshev_polynomial_v_outf(const at::Tensor &x, const at::Tensor &n, at::Tensor &out)
Definition: Functions.h:19375
at::Tensor slice_symint(const at::Tensor &self, int64_t dim=0, c10::optional< c10::SymInt > start=c10::nullopt, c10::optional< c10::SymInt > end=c10::nullopt, c10::SymInt step=1)
Definition: Functions.h:7047
at::Tensor celu(const at::Tensor &self, const at::Scalar &alpha=1.0)
Definition: Functions.h:6854
at::Tensor div(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:2278
at::Tensor & _cholesky_solve_helper_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &A, bool upper)
Definition: Functions.h:24277
inline ::std::tuple< at::Tensor, at::Tensor > batch_norm_update_stats(const at::Tensor &input, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, double momentum)
Definition: Functions.h:5138
at::Tensor & acosh_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:795
at::Tensor & avg_pool2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional< int64_t > divisor_override, at::Tensor &grad_input)
Definition: Functions.h:13463
at::Tensor & _histogramdd_from_bin_cts_outf(const at::Tensor &self, at::IntArrayRef bins, c10::optional< at::ArrayRef< double > > range, const c10::optional< at::Tensor > &weight, bool density, at::Tensor &out)
Definition: Functions.h:24308
inline ::std::tuple< at::Tensor &, at::Tensor & > _weight_norm_interface_backward_outf(const at::Tensor &grad_w, const at::Tensor &saved_v, const at::Tensor &saved_g, const at::Tensor &saved_norms, int64_t dim, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:23104
at::Tensor & arctan2_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:10795
at::Tensor & embedding_renorm_outf(const at::Tensor &self, const at::Tensor &indices, double max_norm, double norm_type, at::Tensor &out)
Definition: Functions.h:20761
at::Tensor expand_copy(const at::Tensor &self, at::IntArrayRef size, bool implicit=false)
Definition: Functions.h:18737
at::Tensor eye(int64_t n, at::TensorOptions options={})
Definition: Functions.h:3037
at::Tensor & baddbmm_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &batch1, const at::Tensor &batch2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:1050
void _foreach_addcmul_(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar &value=1)
Definition: Functions.h:12108
at::Tensor trace(const at::Tensor &self)
Definition: Functions.h:9908
at::Tensor _is_any_true(const at::Tensor &self)
Definition: Functions.h:632
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor > miopen_rnn(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state)
Definition: Functions.h:4809
inline ::std::tuple< at::Tensor &, at::Tensor & > nll_loss2d_forward_symint_out(at::Tensor &output, at::Tensor &total_weight, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight, int64_t reduction, c10::SymInt ignore_index)
Definition: Functions.h:12665
at::Tensor slice(const at::Tensor &self, int64_t dim=0, c10::optional< int64_t > start=c10::nullopt, c10::optional< int64_t > end=c10::nullopt, int64_t step=1)
Definition: Functions.h:7036
at::Tensor & acos_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:493
at::Tensor & linalg_matrix_power_out(at::Tensor &out, const at::Tensor &self, int64_t n)
Definition: Functions.h:18501
inline ::std::vector< at::Tensor > unsafe_chunk(const at::Tensor &self, int64_t chunks, int64_t dim=0)
Definition: Functions.h:1443
at::Tensor embedding_backward(const at::Tensor &grad, const at::Tensor &indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse)
Definition: Functions.h:2428
at::Tensor & inner_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:18179
void _cufft_clear_plan_cache(int64_t device_index)
Definition: Functions.h:3617
at::Tensor matrix_exp(const at::Tensor &self)
Definition: Functions.h:4359
at::Tensor & embedding_outf(const at::Tensor &weight, const at::Tensor &indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor &out)
Definition: Functions.h:20680
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_slogdet_out(at::Tensor &sign, at::Tensor &logabsdet, const at::Tensor &A)
Definition: Functions.h:18020
at::Tensor nanquantile(const at::Tensor &self, const at::Tensor &q, c10::optional< int64_t > dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear")
Definition: Functions.h:11148
at::Tensor quantized_batch_norm(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, const at::Tensor &mean, const at::Tensor &var, double eps, double output_scale, int64_t output_zero_point)
Definition: Functions.h:1082
at::Tensor & convolution_overrideable_outf(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor &out)
Definition: Functions.h:20351
const at::Tensor & fft_ihfft2_outf(const at::Tensor &self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional< c10::string_view > norm, const at::Tensor &out)
Definition: Functions.h:17646
at::Tensor _cudnn_rnn_flatten_weight_symint(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional)
Definition: Functions.h:194
at::Tensor & alias_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25895
at::Tensor & fill_outf(const at::Tensor &self, const at::Scalar &value, at::Tensor &out)
Definition: Functions.h:21235
at::Tensor _copy_from_and_resize(const at::Tensor &self, const at::Tensor &dst)
Definition: Functions.h:1882
at::Tensor logcumsumexp(const at::Tensor &self, int64_t dim)
Definition: Functions.h:4265
at::Tensor & flip_outf(const at::Tensor &self, at::IntArrayRef dims, at::Tensor &out)
Definition: Functions.h:22898
inline ::std::vector< at::Tensor > split_copy_symint(const at::Tensor &self, c10::SymInt split_size, int64_t dim=0)
Definition: Functions.h:18846
at::Tensor multiply(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4895
inline ::std::vector< at::Tensor > _foreach_trunc(at::TensorList self)
Definition: Functions.h:12093
at::Tensor upsample_trilinear3d_symint(const at::Tensor &input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14576
at::Tensor & adaptive_avg_pool3d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13287
at::Tensor & upsample_trilinear3d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales_d=c10::nullopt, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15511
at::Tensor & masked_fill_outf(const at::Tensor &self, const at::Tensor &mask, const at::Scalar &value, at::Tensor &out)
Definition: Functions.h:23989
at::Tensor & exp2_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:2999
at::Tensor & hardshrink_outf(const at::Tensor &self, const at::Scalar &lambd, at::Tensor &out)
Definition: Functions.h:6730
at::Tensor blackman_window(int64_t window_length, at::TensorOptions options={})
Definition: Functions.h:1257
at::Tensor & _log_softmax_backward_data_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &output, int64_t dim, at::ScalarType input_dtype)
Definition: Functions.h:4242
at::Tensor & elu_(at::Tensor &self, const at::Scalar &alpha=1, const at::Scalar &scale=1, const at::Scalar &input_scale=1)
Definition: Functions.h:12887
at::Tensor & frac_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:3171
at::Tensor & logspace_out(at::Tensor &out, const at::Scalar &start, const at::Scalar &end, int64_t steps, double base=10.0)
Definition: Functions.h:4195
at::Tensor & threshold_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Scalar &threshold)
Definition: Functions.h:7757
at::Tensor & replication_pad1d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:14114
at::Tensor & triu_outf(const at::Tensor &self, int64_t diagonal, at::Tensor &out)
Definition: Functions.h:9866
at::Tensor & diff_out(at::Tensor &out, const at::Tensor &self, int64_t n=1, int64_t dim=-1, const c10::optional< at::Tensor > &prepend={}, const c10::optional< at::Tensor > &append={})
Definition: Functions.h:2234
at::Tensor & alias_copy_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25891
at::Tensor & logaddexp_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:4106
at::Tensor _sparse_sum_backward(const at::Tensor &grad, const at::Tensor &self, at::IntArrayRef dim)
Definition: Functions.h:8311
at::Tensor & bernoulli_outf(const at::Tensor &self, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:1106
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > mkldnn_linear_backward(const at::Tensor &self, const at::Tensor &grad_output, const at::Tensor &weight, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:3948
at::Tensor & quantized_max_pool2d_outf(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor &out)
Definition: Functions.h:21740
inline ::std::tuple< at::Tensor &, at::Tensor & > mkldnn_linear_backward_weights_outf(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, bool bias_defined, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:21641
at::Tensor & upsample_nearest1d_backward_symint_out(at::Tensor &grad_input, const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:15709
at::Tensor & addcmul_outf(const at::Tensor &self, const at::Tensor &tensor1, const at::Tensor &tensor2, const at::Scalar &value, at::Tensor &out)
Definition: Functions.h:10406
at::Tensor sparse_csr_tensor(const at::Tensor &crow_indices, const at::Tensor &col_indices, const at::Tensor &values, at::IntArrayRef size, at::TensorOptions options)
Definition: Functions.h:8646
at::Tensor & not_equal_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9963
at::Tensor & complex_out(at::Tensor &out, const at::Tensor &real, const at::Tensor &imag)
Definition: Functions.h:1664
at::Tensor & randperm_outf(int64_t n, at::Tensor &out)
Definition: Functions.h:6376
at::Tensor glu_backward_jvp(const at::Tensor &grad_x, const at::Tensor &grad_glu, const at::Tensor &x, const at::Tensor &dgrad_glu, const at::Tensor &dx, int64_t dim)
Definition: Functions.h:12925
at::Tensor & mm_outf(const at::Tensor &self, const at::Tensor &mat2, at::Tensor &out)
Definition: Functions.h:4828
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > batch_norm_backward_reduce_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &invstd, const c10::optional< at::Tensor > &weight, bool input_g, bool weight_g, bool bias_g)
Definition: Functions.h:22070
at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor &out, const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19287
inline ::std::tuple< at::Tensor, at::Tensor > sort(const at::Tensor &self, int64_t dim=-1, bool descending=false)
Definition: Functions.h:11194
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _cudnn_rnn_symint_outf(const at::Tensor &input, at::TensorList weight, int64_t weight_stride0, const c10::optional< at::Tensor > &weight_buf, const at::Tensor &hx, const c10::optional< at::Tensor > &cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional< at::Tensor > &dropout_state, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4)
Definition: Functions.h:19984
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_group_norm_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:21446
at::Tensor ones_like(const at::Tensor &self, at::TensorOptions options={}, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:5272
void _cummin_helper(const at::Tensor &self, at::Tensor &values, at::Tensor &indices, int64_t dim)
Definition: Functions.h:2071
inline ::std::tuple< at::Tensor &, at::Tensor & > max_outf(const at::Tensor &self, int64_t dim, bool keepdim, at::Tensor &max, at::Tensor &max_values)
Definition: Functions.h:4416
at::Tensor tan(const at::Tensor &self)
Definition: Functions.h:7686
at::Tensor & upsample_nearest2d_backward_outf(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:15962
at::Tensor & hspmm_out(at::Tensor &out, const at::Tensor &mat1, const at::Tensor &mat2)
Definition: Functions.h:8941
at::Tensor & upsample_linear1d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:14829
at::Tensor & ones_like_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:22150
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > linear_backward_outf(const at::Tensor &self, const at::Tensor &grad_output, const at::Tensor &weight, ::std::array< bool, 3 > output_mask, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21614
at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor &input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14620
at::Tensor & upsample_linear1d_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales, at::Tensor &grad_input)
Definition: Functions.h:14862
at::Tensor & threshold_outf(const at::Tensor &self, const at::Scalar &threshold, const at::Scalar &value, at::Tensor &out)
Definition: Functions.h:7752
at::Tensor __lshift__(const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9734
at::Tensor & poisson_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:23172
bool _use_cudnn_rnn_flatten_weight()
Definition: Functions.h:178
void _foreach_sqrt_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24664
at::Tensor & special_logit_outf(const at::Tensor &self, c10::optional< double > eps, at::Tensor &out)
Definition: Functions.h:17342
inline ::std::tuple< at::Tensor &, at::Tensor & > _native_multi_head_attention_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, int64_t embed_dim, int64_t num_head, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, const c10::optional< at::Tensor > &mask={}, bool need_weights=true, bool average_attn_weights=true, c10::optional< int64_t > mask_type=c10::nullopt)
Definition: Functions.h:25953
at::Tensor & q_per_channel_zero_points_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:23692
inline ::std::vector< at::Tensor > _foreach_log(at::TensorList self)
Definition: Functions.h:11953
at::Tensor & lift_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:23967
at::Tensor & new_ones_symint_outf(const at::Tensor &self, c10::SymIntArrayRef size, at::Tensor &out)
Definition: Functions.h:21060
at::Tensor & linalg_matrix_norm_outf(const at::Tensor &self, const at::Scalar &ord, at::IntArrayRef dim, bool keepdim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:18267
at::Tensor logit_backward(const at::Tensor &grad_output, const at::Tensor &self, c10::optional< double > eps=c10::nullopt)
Definition: Functions.h:16370
at::Tensor & nuclear_norm_out(at::Tensor &out, const at::Tensor &self, bool keepdim=false)
Definition: Functions.h:8470
at::Tensor & round_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:6615
at::Tensor fft_hfftn(const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17707
at::Tensor & to_sparse_bsc_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef blocksize, c10::optional< int64_t > dense_dim=c10::nullopt)
Definition: Functions.h:23584
at::Tensor view_as_real_copy(const at::Tensor &self)
Definition: Functions.h:18685
at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor &scales, const at::Tensor &zero_points, int64_t axis, at::TensorOptions options={}, c10::optional< at::MemoryFormat > memory_format=MemoryFormat::Contiguous)
Definition: Functions.h:2794
at::Tensor & amin_outf(const at::Tensor &self, at::IntArrayRef dim, bool keepdim, at::Tensor &out)
Definition: Functions.h:4676
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _embedding_bag_forward_only_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, const at::Tensor &weight, const at::Tensor &indices, const at::Tensor &offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional< at::Tensor > &per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1)
Definition: Functions.h:20771
at::Tensor & embedding_dense_backward_symint_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq)
Definition: Functions.h:20735
at::Tensor resize(const at::Tensor &self, at::IntArrayRef size, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:21133
at::Tensor ceil(const at::Tensor &self)
Definition: Functions.h:1410
at::Tensor & __lshift___outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:24088
at::Tensor & special_spherical_bessel_j0_out(at::Tensor &out, const at::Tensor &x)
Definition: Functions.h:19865
bool __dispatch_is_inference(const at::Tensor &self)
Definition: Functions.h:3777
void _foreach_log10_(at::TensorList self)
Definition: Functions.h:11968
at::Tensor l1_loss(const at::Tensor &self, const at::Tensor &target, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:12282
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > linalg_svd_out(at::Tensor &U, at::Tensor &S, at::Tensor &Vh, const at::Tensor &A, bool full_matrices=true, c10::optional< c10::string_view > driver=c10::nullopt)
Definition: Functions.h:18305
at::Tensor reflection_pad3d_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:14015
at::Tensor & norm_outf(const at::Tensor &self, const c10::optional< at::Scalar > &p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor &out)
Definition: Functions.h:8395
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _linalg_det_out(at::Tensor &result, at::Tensor &LU, at::Tensor &pivots, const at::Tensor &A)
Definition: Functions.h:17884
at::Tensor upsample_nearest1d_symint(const at::Tensor &input, at::OptionalSymIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14642
at::Tensor special_log_ndtr(const at::Tensor &self)
Definition: Functions.h:16997
at::Tensor & multilabel_margin_loss_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &target, int64_t reduction, const at::Tensor &is_target)
Definition: Functions.h:12343
at::Tensor & mkldnn_convolution_outf(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor &out)
Definition: Functions.h:21792
at::Tensor upsample_bilinear2d_backward(const at::Tensor &grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:15005
at::Tensor & _sparse_coo_tensor_with_dims_out(at::Tensor &out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size)
Definition: Functions.h:23403
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > unique_consecutive(const at::Tensor &self, bool return_inverse=false, bool return_counts=false, c10::optional< int64_t > dim=c10::nullopt)
Definition: Functions.h:7944
at::Tensor & fft_ifftn_outf(const at::Tensor &self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17674
at::Tensor stft(const at::Tensor &self, int64_t n_fft, c10::optional< int64_t > hop_length, c10::optional< int64_t > win_length, const c10::optional< at::Tensor > &window, bool normalized, c10::optional< bool > onesided=c10::nullopt, c10::optional< bool > return_complex=c10::nullopt)
Definition: Functions.h:7447
at::Tensor _sparse_broadcast_to_copy(const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:18727
at::Tensor chain_matmul(at::TensorList matrices)
Definition: Functions.h:1429
inline ::std::tuple< at::Tensor &, at::Tensor & > batch_norm_gather_stats_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &invstd, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, double momentum, double eps, int64_t count)
Definition: Functions.h:22043
at::Tensor & logical_and_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:1238
at::Tensor softplus(const at::Tensor &self, const at::Scalar &beta=1, const at::Scalar &threshold=20)
Definition: Functions.h:13128
at::Tensor combinations(const at::Tensor &self, int64_t r=2, bool with_replacement=false)
Definition: Functions.h:9169
at::Tensor cosine_similarity(const at::Tensor &x1, const at::Tensor &x2, int64_t dim=1, double eps=1e-08)
Definition: Functions.h:5321
at::Tensor _nested_select_backward(const at::Tensor &grad_output, const at::Tensor &self, int64_t dim, int64_t index)
Definition: Functions.h:6822
at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:8145
at::Tensor & _test_autograd_multiple_dispatch_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:25348
at::Tensor _resize_output(const at::Tensor &self, at::IntArrayRef size, at::Device device)
Definition: Functions.h:21164
inline ::std::vector< at::Tensor > _foreach_log2(at::TensorList self)
Definition: Functions.h:11983
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor > linalg_lstsq(const at::Tensor &self, const at::Tensor &b, c10::optional< double > rcond=c10::nullopt, c10::optional< c10::string_view > driver=c10::nullopt)
Definition: Functions.h:17954
at::Tensor & chain_matmul_out(at::Tensor &out, at::TensorList matrices)
Definition: Functions.h:1434
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor > _embedding_bag(const at::Tensor &weight, const at::Tensor &indices, const at::Tensor &offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional< at::Tensor > &per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1)
Definition: Functions.h:2516
at::Tensor & rsqrt_(at::Tensor &self)
Definition: Functions.h:6759
at::Tensor _mps_convolution(const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups)
Definition: Functions.h:4681
at::Tensor lift_fresh_copy(const at::Tensor &self)
Definition: Functions.h:9404
at::Tensor & tanh_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:7715
at::Tensor & i0_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:10730
inline ::std::tuple< at::Tensor, at::Tensor > adaptive_max_pool3d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13426
at::Tensor amax(const at::Tensor &self, at::IntArrayRef dim={}, bool keepdim=false)
Definition: Functions.h:4457
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > unique_dim(const at::Tensor &self, int64_t dim, bool sorted=true, bool return_inverse=false, bool return_counts=false)
Definition: Functions.h:7939
at::Tensor outer(const at::Tensor &self, const at::Tensor &vec2)
Definition: Functions.h:18188
at::Tensor & threshold_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &threshold, const at::Scalar &value)
Definition: Functions.h:7748
at::Tensor & t_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25734
at::Tensor & floor_(at::Tensor &self)
Definition: Functions.h:3128
at::Tensor rrelu(const at::Tensor &self, const at::Scalar &lower=0.125, const at::Scalar &upper=0.3333333333333333, bool training=false, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:6643
inline ::std::tuple< at::Tensor &, at::Tensor & > cummax_outf(const at::Tensor &self, int64_t dim, at::Tensor &values, at::Tensor &indices)
Definition: Functions.h:2019
at::Tensor & softshrink_outf(const at::Tensor &self, const at::Scalar &lambd, at::Tensor &out)
Definition: Functions.h:13151
at::Tensor & hardtanh_(at::Tensor &self, const at::Scalar &min_val=-1, const at::Scalar &max_val=1)
Definition: Functions.h:12991
at::Tensor & replication_pad2d_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding, at::Tensor &grad_input)
Definition: Functions.h:14246
at::Tensor & channel_shuffle_outf(const at::Tensor &self, int64_t groups, at::Tensor &out)
Definition: Functions.h:22226
inline ::std::tuple< at::Tensor &, at::Tensor & > native_dropout_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &input, double p, c10::optional< bool > train)
Definition: Functions.h:20066
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > mps_convolution_backward(const at::Tensor &self, const at::Tensor &grad_output, const at::Tensor &weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:4686
at::Tensor bincount(const at::Tensor &self, const c10::optional< at::Tensor > &weights={}, int64_t minlength=0)
Definition: Functions.h:1154
at::Tensor & lerp_outf(const at::Tensor &self, const at::Tensor &end, const at::Scalar &weight, at::Tensor &out)
Definition: Functions.h:10804
void _foreach_neg_(at::TensorList self)
Definition: Functions.h:11998
at::Tensor & _slow_conv2d_forward_out(at::Tensor &output, const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, at::IntArrayRef padding)
Definition: Functions.h:16535
at::Tensor & mkldnn_reorder_conv3d_weight_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1)
Definition: Functions.h:23611
void _foreach_mul_(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11603
at::Tensor & to_mkldnn_outf(const at::Tensor &self, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:23597
inline ::std::vector< at::Tensor > split_with_sizes_copy_symint(const at::Tensor &self, c10::SymIntArrayRef split_sizes, int64_t dim=0)
Definition: Functions.h:18868
void _foreach_abs_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24673
at::Tensor & _sample_dirichlet_out(at::Tensor &out, const at::Tensor &self, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:23163
at::Tensor ccol_indices_copy(const at::Tensor &self)
Definition: Functions.h:18939
void _foreach_atan_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24696
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > _flash_attention_backward(const at::Tensor &grad_out, const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, const at::Tensor &out, const at::Tensor &logsumexp, const at::Tensor &cum_seq_q, const at::Tensor &cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset)
Definition: Functions.h:19167
at::Tensor resize_as(const at::Tensor &self, const at::Tensor &the_template, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:23325
at::Tensor & expand_copy_outf(const at::Tensor &self, at::IntArrayRef size, bool implicit, at::Tensor &out)
Definition: Functions.h:25520
at::Tensor mm(const at::Tensor &self, const at::Tensor &mat2)
Definition: Functions.h:4819
at::Tensor & upsample_nearest1d_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales, at::Tensor &grad_input)
Definition: Functions.h:15720
at::Tensor special_modified_bessel_k1(const at::Tensor &self)
Definition: Functions.h:19650
inline ::std::tuple< at::Tensor &, at::Tensor & > adaptive_max_pool3d_out(at::Tensor &out, at::Tensor &indices, const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13417
at::Tensor _sparse_csr_sum(const at::Tensor &self, at::IntArrayRef dim, bool keepdim=false, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:8316
inline ::std::tuple< at::Tensor &, at::Tensor & > _weight_norm_interface_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &v, const at::Tensor &g, int64_t dim=0)
Definition: Functions.h:23091
void _foreach_ceil_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24705
at::Tensor & max_pool2d_with_indices_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor &indices, at::Tensor &grad_input)
Definition: Functions.h:13575
at::Tensor & _log_softmax_backward_data_outf(const at::Tensor &grad_output, const at::Tensor &output, int64_t dim, at::ScalarType input_dtype, at::Tensor &out)
Definition: Functions.h:4246
at::Tensor & sin_(at::Tensor &self)
Definition: Functions.h:6964
at::Tensor & fix_(at::Tensor &self)
Definition: Functions.h:7915
at::Tensor & cumprod_out(at::Tensor &out, const at::Tensor &self, int64_t dim, c10::optional< at::ScalarType > dtype=c10::nullopt)
Definition: Functions.h:2086
at::Tensor view_copy(const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:19051
at::Tensor scatter_add(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &src)
Definition: Functions.h:9572
void _foreach_frac_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24867
at::Tensor & cauchy_outf(const at::Tensor &self, double median, double sigma, c10::optional< at::Generator > generator, at::Tensor &out)
Definition: Functions.h:24198
at::Tensor & _masked_scale_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &mask, double scale)
Definition: Functions.h:20057
void _validate_sparse_bsr_tensor_args(const at::Tensor &crow_indices, const at::Tensor &col_indices, const at::Tensor &values, at::IntArrayRef size)
Definition: Functions.h:8863
inline ::std::tuple< at::Tensor &, at::Tensor & > batch_norm_gather_stats_with_counts_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &invstd, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, double momentum, double eps, const at::Tensor &counts)
Definition: Functions.h:22052
at::Tensor crow_indices_copy(const at::Tensor &self)
Definition: Functions.h:18929
at::Tensor rnn_relu_cell(const at::Tensor &input, const at::Tensor &hx, const at::Tensor &w_ih, const at::Tensor &w_hh, const c10::optional< at::Tensor > &b_ih={}, const c10::optional< at::Tensor > &b_hh={})
Definition: Functions.h:9309
at::Tensor & abs_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:363
at::Tensor _nested_select_backward_symint(const at::Tensor &grad_output, const at::Tensor &self, int64_t dim, c10::SymInt index)
Definition: Functions.h:6833
at::Tensor & col_indices_copy_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:25806
at::Tensor special_multigammaln(const at::Tensor &self, int64_t p)
Definition: Functions.h:17464
void _foreach_div_(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11613
at::Tensor select_symint(const at::Tensor &self, int64_t dim, c10::SymInt index)
Definition: Functions.h:6789
at::Tensor & argsort_outf(const at::Tensor &self, bool stable, int64_t dim, bool descending, at::Tensor &out)
Definition: Functions.h:24335
at::Tensor & sinh_(at::Tensor &self)
Definition: Functions.h:7002
at::Tensor sparse_resize(const at::Tensor &self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim)
Definition: Functions.h:23465
at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor &indices, const at::Tensor &values, at::IntArrayRef size, at::TensorOptions options={})
Definition: Functions.h:8799
inline ::std::tuple< at::Tensor &, at::Tensor & > var_mean_outf(const at::Tensor &self, at::OptionalIntArrayRef dim, c10::optional< int64_t > correction, bool keepdim, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:23086
at::Tensor & linalg_cholesky_outf(const at::Tensor &self, bool upper, at::Tensor &out)
Definition: Functions.h:17804
at::Tensor & from_file_out(at::Tensor &out, c10::string_view filename, c10::optional< bool > shared=c10::nullopt, c10::optional< int64_t > size=0)
Definition: Functions.h:21267
at::Tensor & max_pool2d_with_indices_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor &indices)
Definition: Functions.h:13571
inline ::std::tuple< at::Tensor &, at::Tensor & > linalg_eigh_out(at::Tensor &eigvals, at::Tensor &eigvecs, const at::Tensor &self, c10::string_view UPLO="L")
Definition: Functions.h:18095
at::Tensor silu_backward(const at::Tensor &grad_output, const at::Tensor &self)
Definition: Functions.h:6892
at::Tensor special_i1e(const at::Tensor &self)
Definition: Functions.h:17319
at::Tensor special_spherical_bessel_j0(const at::Tensor &x)
Definition: Functions.h:19860
at::Tensor isin(const at::Tensor &elements, const at::Tensor &test_elements, bool assume_unique=false, bool invert=false)
Definition: Functions.h:3689
at::Tensor & eq_out(at::Tensor &out, const at::Tensor &self, const at::Scalar &other)
Definition: Functions.h:9991
inline ::std::tuple< at::Tensor &, at::Tensor & > _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_outf(const at::Tensor &self, const at::Tensor &scale, const at::Tensor &zero_point, const at::Tensor &fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:23741
at::Tensor & row_stack_out(at::Tensor &out, at::TensorList tensors)
Definition: Functions.h:2497
at::Tensor gcd(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3304
at::Tensor value_selecting_reduction_backward_symint(const at::Tensor &grad, int64_t dim, const at::Tensor &indices, c10::SymIntArrayRef sizes, bool keepdim)
Definition: Functions.h:4446
at::Tensor addcmul(const at::Tensor &self, const at::Tensor &tensor1, const at::Tensor &tensor2, const at::Scalar &value=1)
Definition: Functions.h:10411
at::Tensor & linalg_norm_outf(const at::Tensor &self, const c10::optional< at::Scalar > &ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional< at::ScalarType > dtype, at::Tensor &out)
Definition: Functions.h:18230
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _thnn_fused_gru_cell_backward_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, at::Tensor &out3, at::Tensor &out4, const at::Tensor &grad_hy, const at::Tensor &workspace, bool has_bias)
Definition: Functions.h:23841
at::Tensor & im2col_outf(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:16911
at::Tensor slow_conv_dilated2d_symint(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias={}, at::IntArrayRef stride=1, c10::SymIntArrayRef padding=c10::SymInt(0), at::IntArrayRef dilation=1)
Definition: Functions.h:16794
at::Tensor sspaddmm(const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta=1, const at::Scalar &alpha=1)
Definition: Functions.h:7363
at::Tensor & clamp_max_outf(const at::Tensor &self, const at::Scalar &max, at::Tensor &out)
Definition: Functions.h:1564
inline ::std::tuple< at::Tensor &, at::Tensor & > max_pool2d_with_indices_out(at::Tensor &out, at::Tensor &indices, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:13557
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _transform_bias_rescale_qkv_outf(const at::Tensor &qkv, const at::Tensor &qkv_bias, int64_t num_heads, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:22925
at::Tensor & arctan2_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:10791
at::Tensor _triton_scaled_dot_attention(const at::Tensor &q, const at::Tensor &k, const at::Tensor &v, double dropout_p=0.0)
Definition: Functions.h:19182
at::Tensor & _triton_multi_head_attention_outf(const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, int64_t embed_dim, int64_t num_head, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, const c10::optional< at::Tensor > &mask, at::Tensor &out)
Definition: Functions.h:25975
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > miopen_batch_norm_backward_outf(const at::Tensor &input, const at::Tensor &grad_output, const at::Tensor &weight, const c10::optional< at::Tensor > &running_mean, const c10::optional< at::Tensor > &running_var, const c10::optional< at::Tensor > &save_mean, const c10::optional< at::Tensor > &save_var, double epsilon, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21856
inline ::std::tuple< at::Tensor, at::Tensor > qr(const at::Tensor &self, bool some=true)
Definition: Functions.h:10570
at::Tensor & upsample_linear1d_outf(const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales, at::Tensor &out)
Definition: Functions.h:14774
inline ::std::tuple< at::Tensor &, at::Tensor & > _weight_norm_interface_backward_out(at::Tensor &out0, at::Tensor &out1, const at::Tensor &grad_w, const at::Tensor &saved_v, const at::Tensor &saved_g, const at::Tensor &saved_norms, int64_t dim)
Definition: Functions.h:23100
const at::Tensor & sparse_resize_out(const at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim)
Definition: Functions.h:23456
at::Tensor add(const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha=1)
Definition: Functions.h:536
at::Tensor & isposinf_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:16940
at::Tensor & max_unpool2d_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &indices, at::IntArrayRef output_size)
Definition: Functions.h:13613
at::Tensor & fft_irfftn_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17698
at::Tensor narrow_copy(const at::Tensor &self, int64_t dim, int64_t start, int64_t length)
Definition: Functions.h:4942
at::Tensor arcsinh(const at::Tensor &self)
Definition: Functions.h:838
at::Tensor & reflection_pad1d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef padding)
Definition: Functions.h:13707
at::Tensor _compute_linear_combination(const at::Tensor &input, const at::Tensor &coefficients)
Definition: Functions.h:4393
at::Tensor & linalg_matmul_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:17973
at::Tensor grid_sampler_2d(const at::Tensor &input, const at::Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners)
Definition: Functions.h:3338
at::Tensor & constant_pad_nd_outf(const at::Tensor &self, at::IntArrayRef pad, const at::Scalar &value, at::Tensor &out)
Definition: Functions.h:20226
void _foreach_cos_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24718
inline ::std::tuple< at::Tensor &, at::Tensor & > aminmax_outf(const at::Tensor &self, c10::optional< int64_t > dim, bool keepdim, at::Tensor &min, at::Tensor &max)
Definition: Functions.h:4388
at::Tensor & select_copy_out(at::Tensor &out, const at::Tensor &self, int64_t dim, int64_t index)
Definition: Functions.h:25606
at::Tensor fft_ihfft(const at::Tensor &self, c10::optional< int64_t > n=c10::nullopt, int64_t dim=-1, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17553
at::Tensor & nll_loss_symint_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100)
Definition: Functions.h:12379
at::Tensor & _nnpack_spatial_convolution_out(at::Tensor &out, const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride=1)
Definition: Functions.h:22097
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor, at::Tensor > embedding_bag(const at::Tensor &weight, const at::Tensor &indices, const at::Tensor &offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional< at::Tensor > &per_sample_weights={}, bool include_last_offset=false)
Definition: Functions.h:2506
at::Tensor & erfinv_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:10711
at::Tensor nan_to_num(const at::Tensor &self, c10::optional< double > nan=c10::nullopt, c10::optional< double > posinf=c10::nullopt, c10::optional< double > neginf=c10::nullopt)
Definition: Functions.h:3895
at::Tensor & cos_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:1897
at::Tensor & expand_copy_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, bool implicit=false)
Definition: Functions.h:25509
at::Tensor permute(const at::Tensor &self, at::IntArrayRef dims)
Definition: Functions.h:5326
at::Tensor & neg_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:6464
void _foreach_reciprocal_(at::TensorList self)
Definition: Functions.h:12078
at::Tensor sinh(const at::Tensor &self)
Definition: Functions.h:6997
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_group_norm_backward_symint_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &grad_out, const at::Tensor &input, const at::Tensor &mean, const at::Tensor &rstd, const c10::optional< at::Tensor > &weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:21468
at::Tensor & count_nonzero_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef dim)
Definition: Functions.h:20445
at::Tensor isreal(const at::Tensor &self)
Definition: Functions.h:3757
at::Tensor & fft_ihfft_outf(const at::Tensor &self, c10::optional< int64_t > n, int64_t dim, c10::optional< c10::string_view > norm, at::Tensor &out)
Definition: Functions.h:17562
at::Tensor kron(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:3787
at::Tensor & mkldnn_linear_backward_input_outf(at::IntArrayRef input_size, const at::Tensor &grad_output, const at::Tensor &weight, at::Tensor &out)
Definition: Functions.h:21632
at::Tensor & vdot_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:2392
at::Tensor & log_sigmoid_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &buffer)
Definition: Functions.h:13081
at::Tensor index_fill(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Scalar &value)
Definition: Functions.h:9486
at::Tensor _cholesky_solve_helper(const at::Tensor &self, const at::Tensor &A, bool upper)
Definition: Functions.h:10542
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor & > _linalg_slogdet_out(at::Tensor &sign, at::Tensor &logabsdet, at::Tensor &LU, at::Tensor &pivots, const at::Tensor &A)
Definition: Functions.h:18006
at::Tensor special_round(const at::Tensor &self, int64_t decimals=0)
Definition: Functions.h:17403
at::Tensor & smooth_l1_loss_outf(const at::Tensor &self, const at::Tensor &target, int64_t reduction, double beta, at::Tensor &out)
Definition: Functions.h:12779
at::Tensor & _compute_linear_combination_outf(const at::Tensor &input, const at::Tensor &coefficients, at::Tensor &out)
Definition: Functions.h:4402
at::Tensor & fill_(at::Tensor &self, const at::Scalar &value)
Definition: Functions.h:3113
at::Tensor cross_entropy_loss(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100, double label_smoothing=0.0)
Definition: Functions.h:10430
at::Tensor & greater_equal_outf(const at::Tensor &self, const at::Scalar &other, at::Tensor &out)
Definition: Functions.h:10051
at::Tensor special_bessel_y0(const at::Tensor &self)
Definition: Functions.h:19244
at::Tensor ctc_loss(const at::Tensor &log_probs, const at::Tensor &targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, int64_t reduction=at::Reduction::Mean, bool zero_infinity=false)
Definition: Functions.h:2152
at::Tensor & _log_softmax_out(at::Tensor &out, const at::Tensor &self, int64_t dim, bool half_to_float)
Definition: Functions.h:4228
at::Tensor mv(const at::Tensor &self, const at::Tensor &vec)
Definition: Functions.h:4914
at::Tensor _unsafe_view(const at::Tensor &self, at::IntArrayRef size)
Definition: Functions.h:7959
void _foreach_floor_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24759
at::Tensor & avg_pool3d_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional< int64_t > divisor_override)
Definition: Functions.h:13487
at::Tensor & sparse_coo_tensor_outf(at::IntArrayRef size, at::Tensor &out)
Definition: Functions.h:23398
at::Tensor & embedding_dense_backward_outf(const at::Tensor &grad_output, const at::Tensor &indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, at::Tensor &out)
Definition: Functions.h:20724
at::Tensor & empty_quantized_outf(at::IntArrayRef size, const at::Tensor &qtensor, c10::optional< at::MemoryFormat > memory_format, at::Tensor &out)
Definition: Functions.h:21173
at::Tensor & linalg_matrix_rank_outf(const at::Tensor &input, const c10::optional< at::Tensor > &atol, const c10::optional< at::Tensor > &rtol, bool hermitian, at::Tensor &out)
Definition: Functions.h:18519
at::Tensor & rrelu_with_noise_backward_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &self, const at::Tensor &noise, const at::Scalar &lower, const at::Scalar &upper, bool training, bool self_is_result)
Definition: Functions.h:25029
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > linalg_lu(const at::Tensor &A, bool pivot=true)
Definition: Functions.h:17851
inline ::std::vector< at::Tensor > _foreach_abs(at::TensorList self)
Definition: Functions.h:11843
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > _thnn_fused_lstm_cell_out(at::Tensor &out0, at::Tensor &out1, at::Tensor &out2, const at::Tensor &input_gates, const at::Tensor &hidden_gates, const at::Tensor &cx, const c10::optional< at::Tensor > &input_bias={}, const c10::optional< at::Tensor > &hidden_bias={})
Definition: Functions.h:23814
at::Tensor & addcmul_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &tensor1, const at::Tensor &tensor2, const at::Scalar &value=1)
Definition: Functions.h:10402
at::Tensor & set_symint_out(at::Tensor &out, const at::Tensor &self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={})
Definition: Functions.h:23895
at::Tensor & scatter_out(at::Tensor &out, const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &src)
Definition: Functions.h:9511
at::Tensor & tanh_backward_out(at::Tensor &grad_input, const at::Tensor &grad_output, const at::Tensor &output)
Definition: Functions.h:16375
inline ::std::tuple< at::Tensor, at::Tensor > _sparse_mm_reduce_impl_backward(const at::Tensor &self, const at::Tensor &grad_out, const at::Tensor &weight, c10::string_view reduce, const at::Tensor &arg_out, ::std::array< bool, 2 > output_mask)
Definition: Functions.h:8604
at::Tensor bitwise_left_shift(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:9744
at::Tensor & fft_ifftn_out(at::Tensor &out, const at::Tensor &self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional< c10::string_view > norm=c10::nullopt)
Definition: Functions.h:17670
at::Tensor _transformer_encoder_layer_fwd(const at::Tensor &src, int64_t embed_dim, int64_t num_heads, const at::Tensor &qkv_weight, const at::Tensor &qkv_bias, const at::Tensor &proj_weight, const at::Tensor &proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor &norm_weight_1, const at::Tensor &norm_bias_1, const at::Tensor &norm_weight_2, const at::Tensor &norm_bias_2, const at::Tensor &ffn_weight_1, const at::Tensor &ffn_bias_1, const at::Tensor &ffn_weight_2, const at::Tensor &ffn_bias_2, const c10::optional< at::Tensor > &mask={}, c10::optional< int64_t > mask_type=c10::nullopt)
Definition: Functions.h:19107
at::Tensor upsample_linear1d_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:14884
inline ::std::tuple< at::Tensor, at::Tensor > rnn_relu(const at::Tensor &input, const at::Tensor &hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first)
Definition: Functions.h:9284
at::Tensor & cholesky_solve_outf(const at::Tensor &self, const at::Tensor &input2, bool upper, at::Tensor &out)
Definition: Functions.h:10532
at::Tensor slice_scatter(const at::Tensor &self, const at::Tensor &src, int64_t dim=0, c10::optional< int64_t > start=c10::nullopt, c10::optional< int64_t > end=c10::nullopt, int64_t step=1)
Definition: Functions.h:7080
at::Tensor special_chebyshev_polynomial_t(const at::Tensor &x, const at::Tensor &n)
Definition: Functions.h:19272
at::Tensor & _mkldnn_transpose_outf(const at::Tensor &self, int64_t dim0, int64_t dim1, at::Tensor &out)
Definition: Functions.h:22889
at::Tensor special_scaled_modified_bessel_k1(const at::Tensor &x)
Definition: Functions.h:19678
at::Tensor & miopen_convolution_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
Definition: Functions.h:21861
at::Tensor binary_cross_entropy_with_logits(const at::Tensor &self, const at::Tensor &target, const c10::optional< at::Tensor > &weight={}, const c10::optional< at::Tensor > &pos_weight={}, int64_t reduction=at::Reduction::Mean)
Definition: Functions.h:1149
const at::Tensor & resize_as_(const at::Tensor &self, const at::Tensor &the_template, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:8503
at::Tensor upsample_nearest3d_symint(const at::Tensor &input, at::OptionalSymIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14730
at::Tensor & _nnpack_spatial_convolution_symint_outf(const at::Tensor &input, const at::Tensor &weight, const c10::optional< at::Tensor > &bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:22130
void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, at::TensorList weights)
Definition: Functions.h:12173
inline ::std::tuple< at::Tensor, at::Tensor > fake_quantize_per_tensor_affine_cachemask(const at::Tensor &self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max)
Definition: Functions.h:9070
inline ::std::tuple< at::Tensor, at::Tensor, at::Tensor > convolution_backward(const at::Tensor &grad_output, const at::Tensor &input, const at::Tensor &weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array< bool, 3 > output_mask)
Definition: Functions.h:1731
at::Tensor scatter(const at::Tensor &self, int64_t dim, const at::Tensor &index, const at::Tensor &src)
Definition: Functions.h:9506
at::Tensor & logcumsumexp_out(at::Tensor &out, const at::Tensor &self, int64_t dim)
Definition: Functions.h:4270
at::Tensor cov(const at::Tensor &self, int64_t correction=1, const c10::optional< at::Tensor > &fweights={}, const c10::optional< at::Tensor > &aweights={})
Definition: Functions.h:1940
at::Tensor & _upsample_nearest_exact2d_backward_symint_outf(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales_h, c10::optional< double > scales_w, at::Tensor &grad_input)
Definition: Functions.h:16028
at::Tensor & as_strided_copy_outf(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional< int64_t > storage_offset, at::Tensor &out)
Definition: Functions.h:25458
at::Tensor & mkldnn_max_pool2d_backward_out(at::Tensor &out, const at::Tensor &grad_output, const at::Tensor &output, const at::Tensor &input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:21700
at::Tensor & cat_outf(const at::ITensorListRef &tensors, int64_t dim, at::Tensor &out)
Definition: Functions.h:1330
at::Tensor & normal_out(at::Tensor &out, const at::Tensor &mean, double std=1, c10::optional< at::Generator > generator=c10::nullopt)
Definition: Functions.h:11433
at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options={})
Definition: Functions.h:2906
void _foreach_exp_out(at::TensorList out, at::TensorList self)
Definition: Functions.h:24637
at::Tensor & special_erfcx_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17114
at::Tensor & to_sparse_csr_outf(const at::Tensor &self, c10::optional< int64_t > dense_dim, at::Tensor &out)
Definition: Functions.h:23561
at::Tensor & erfc_(at::Tensor &self)
Definition: Functions.h:2952
at::Tensor & fmax_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:11046
void _foreach_expm1_outf(at::TensorList self, at::TensorList out)
Definition: Functions.h:24754
void _foreach_add_(at::TensorList self, const at::Scalar &scalar)
Definition: Functions.h:11583
at::Tensor & ger_out(at::Tensor &out, const at::Tensor &self, const at::Tensor &vec2)
Definition: Functions.h:18207
at::Tensor _to_copy(const at::Tensor &self, at::TensorOptions options={}, bool non_blocking=false, c10::optional< at::MemoryFormat > memory_format=c10::nullopt)
Definition: Functions.h:9145
at::Tensor _upsample_nearest_exact1d(const at::Tensor &input, at::OptionalIntArrayRef output_size, c10::optional< at::ArrayRef< double > > scale_factors)
Definition: Functions.h:14653
at::Tensor fake_quantize_per_tensor_affine(const at::Tensor &self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max)
Definition: Functions.h:9060
at::Tensor avg_pool2d(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional< int64_t > divisor_override=c10::nullopt)
Definition: Functions.h:13454
at::Tensor _conv_depthwise2d_symint(const at::Tensor &self, const at::Tensor &weight, at::IntArrayRef kernel_size, const c10::optional< at::Tensor > &bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation)
Definition: Functions.h:16618
at::Tensor upsample_nearest2d_backward_symint(const at::Tensor &grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional< double > scales_h=c10::nullopt, c10::optional< double > scales_w=c10::nullopt)
Definition: Functions.h:16050
at::Tensor mkldnn_adaptive_avg_pool2d(const at::Tensor &self, at::IntArrayRef output_size)
Definition: Functions.h:13241
at::Tensor max_pool2d_with_indices_backward(const at::Tensor &grad_output, const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor &indices)
Definition: Functions.h:13580
inline ::std::tuple< at::Tensor &, at::Tensor & > max_pool3d_with_indices_outf(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor &out, at::Tensor &indices)
Definition: Functions.h:13589
at::Tensor _weight_norm(const at::Tensor &v, const at::Tensor &g, int64_t dim=0)
Definition: Functions.h:8116
at::Tensor & glu_jvp_outf(const at::Tensor &glu, const at::Tensor &x, const at::Tensor &dx, int64_t dim, at::Tensor &out)
Definition: Functions.h:25006
at::Tensor & _sparse_log_softmax_outf(const at::Tensor &self, int64_t dim, bool half_to_float, at::Tensor &out)
Definition: Functions.h:23266
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_group_norm_outf(const at::Tensor &input, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21413
at::Tensor & sparse_sampled_addmm_outf(const at::Tensor &self, const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta, const at::Scalar &alpha, at::Tensor &out)
Definition: Functions.h:8589
at::Tensor & msort_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:11232
at::Tensor bitwise_right_shift(const at::Tensor &self, const at::Tensor &other)
Definition: Functions.h:9787
at::Tensor dstack(at::TensorList tensors)
Definition: Functions.h:7433
at::Tensor & unsqueeze_copy_outf(const at::Tensor &self, int64_t dim, at::Tensor &out)
Definition: Functions.h:25752
at::Tensor pow(const at::Tensor &self, const at::Tensor &exponent)
Definition: Functions.h:11353
at::Tensor & gelu_backward_outf(const at::Tensor &grad_output, const at::Tensor &self, c10::string_view approximate, at::Tensor &grad_input)
Definition: Functions.h:6711
at::Tensor max_pool2d(const at::Tensor &self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false)
Definition: Functions.h:4481
inline ::std::tuple< at::Tensor &, at::Tensor & > batch_norm_stats_outf(const at::Tensor &input, double eps, at::Tensor &out0, at::Tensor &out1)
Definition: Functions.h:22038
inline ::std::tuple< at::Tensor &, at::Tensor & > max_out(at::Tensor &max, at::Tensor &max_values, const at::Tensor &self, int64_t dim, bool keepdim=false)
Definition: Functions.h:4412
at::Tensor & new_empty_strided_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride)
Definition: Functions.h:20895
at::Tensor layer_norm_symint(const at::Tensor &input, c10::SymIntArrayRef normalized_shape, const c10::optional< at::Tensor > &weight={}, const c10::optional< at::Tensor > &bias={}, double eps=1e-05, bool cudnn_enable=true)
Definition: Functions.h:3840
at::Tensor & upsample_linear1d_out(at::Tensor &out, const at::Tensor &self, at::IntArrayRef output_size, bool align_corners, c10::optional< double > scales=c10::nullopt)
Definition: Functions.h:14763
at::Tensor & arccos_(at::Tensor &self)
Definition: Functions.h:507
at::Tensor cumprod_backward(const at::Tensor &grad, const at::Tensor &input, int64_t dim, const at::Tensor &output)
Definition: Functions.h:2109
at::Tensor & special_expit_out(at::Tensor &out, const at::Tensor &self)
Definition: Functions.h:17380
at::Tensor & affine_grid_generator_out(at::Tensor &out, const at::Tensor &theta, at::IntArrayRef size, bool align_corners)
Definition: Functions.h:20111
at::Tensor polygamma(int64_t n, const at::Tensor &self)
Definition: Functions.h:10701
at::Tensor & logaddexp_outf(const at::Tensor &self, const at::Tensor &other, at::Tensor &out)
Definition: Functions.h:4110
at::Tensor & special_log_ndtr_outf(const at::Tensor &self, at::Tensor &out)
Definition: Functions.h:17006
at::Tensor & diagonal_scatter_outf(const at::Tensor &self, const at::Tensor &src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor &out)
Definition: Functions.h:22721
at::Tensor & new_empty_strided_outf(const at::Tensor &self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor &out)
Definition: Functions.h:20906
inline ::std::tuple< at::Tensor &, at::Tensor &, at::Tensor & > native_layer_norm_symint_outf(const at::Tensor &input, c10::SymIntArrayRef normalized_shape, const c10::optional< at::Tensor > &weight, const c10::optional< at::Tensor > &bias, double eps, at::Tensor &out0, at::Tensor &out1, at::Tensor &out2)
Definition: Functions.h:21555
constexpr nullopt_t nullopt
Definition: Optional.h:163
constexpr optional< typename std::decay< T >::type > make_optional(T &&v)
Definition: Optional.h:1223
Definition: Device.h:181