PyTorch
|
#include <TensorBase.h>
Classes | |
struct | unsafe_borrow_t |
Public Types | |
template<typename T > | |
using | hook_return_void_t = std::enable_if_t< std::is_void< typename c10::invoke_result_t< T &, TensorBase > >::value, unsigned > |
template<typename T > | |
using | hook_return_var_t = std::enable_if_t< std::is_same< typename c10::invoke_result_t< T &, TensorBase >, TensorBase >::value, unsigned > |
Public Member Functions | |
TensorBase ()=default | |
TensorBase (c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl > tensor_impl) | |
TensorBase (const TensorBase &)=default | |
TensorBase (TensorBase &&)=default | |
int64_t | dim () const |
int64_t | storage_offset () const |
TensorBase | contiguous (MemoryFormat memory_format=MemoryFormat::Contiguous) const |
c10::MaybeOwned< TensorBase > | expect_contiguous (MemoryFormat memory_format=MemoryFormat::Contiguous) const & |
Should be used if *this can reasonably be expected to be contiguous and performance is important. More... | |
c10::MaybeOwned< TensorBase > | expect_contiguous (MemoryFormat memory_format=MemoryFormat::Contiguous) &&=delete |
const TensorBase & | fill_ (const c10::Scalar &scalar) const |
const TensorBase & | zero_ () const |
TensorBase | to (at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional< at::MemoryFormat > memory_format=c10::nullopt) const |
bool | is_complex () const |
bool | is_floating_point () const |
bool | is_signed () const |
c10::SymInt | sym_size (int64_t dim) const |
c10::SymInt | sym_stride (int64_t dim) const |
int64_t | size (int64_t dim) const |
int64_t | stride (int64_t dim) const |
TensorImpl * | unsafeGetTensorImpl () const |
TensorImpl * | unsafeReleaseTensorImpl () |
const c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl > & | getIntrusivePtr () const |
c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl > | unsafeReleaseIntrusivePtr () |
bool | defined () const |
void | reset () |
TensorBase & | operator= (const TensorBase &x) & |
TensorBase & | operator= (TensorBase &&x) &noexcept |
TensorBase & | operator= (const TensorBase &) &&=delete |
TensorBase & | operator= (TensorBase &&) &&noexcept=delete |
bool | is_same (const TensorBase &other) const noexcept |
size_t | use_count () const noexcept |
size_t | weak_use_count () const noexcept |
std::string | toString () const |
IntArrayRef | sizes () const |
c10::SymIntArrayRef | sym_sizes () const |
c10::SymIntArrayRef | sym_strides () const |
IntArrayRef | strides () const |
c10::optional< DimnameList > | opt_names () const |
DimnameList | names () const |
int64_t | ndimension () const |
bool | is_contiguous (at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const |
bool | is_non_overlapping_and_dense () const |
at::MemoryFormat | suggest_memory_format (bool channels_last_strides_exact_match=false) const |
size_t | nbytes () const |
c10::SymInt | sym_nbytes () const |
int64_t | numel () const |
c10::SymInt | sym_numel () const |
c10::SymInt | sym_storage_offset () const |
size_t | itemsize () const |
int64_t | element_size () const |
DispatchKeySet | key_set () const |
ScalarType | scalar_type () const |
bool | has_storage () const |
const Storage & | storage () const |
bool | is_alias_of (const at::TensorBase &other) const |
bool | _is_zerotensor () const |
void | _set_zero (bool zero) const |
bool | is_conj () const |
void | _set_conj (bool conjugate) const |
bool | is_neg () const |
void | _set_neg (bool negative) const |
Layout | layout () const |
Returns a Tensor 's layout. More... | |
caffe2::TypeMeta | dtype () const |
Returns a Tensor 's dtype (TypeMeta ). More... | |
Device | device () const |
Returns a Tensor 's device. More... | |
int64_t | get_device () const |
Returns a Tensor 's device index. More... | |
bool | is_cpu () const |
Returns if a Tensor has CPU backend. More... | |
bool | is_cuda () const |
Returns if a Tensor has CUDA backend. More... | |
bool | is_ipu () const |
Returns if a Tensor has IPU backend. More... | |
bool | is_xpu () const |
Returns if a Tensor has XPU backend. More... | |
bool | is_xla () const |
Returns if a Tensor has XLA backend. More... | |
bool | is_hpu () const |
Returns if a Tensor has HPU backend. More... | |
bool | is_lazy () const |
Returns if a Tensor has Lazy backend. More... | |
bool | is_hip () const |
Returns if a Tensor has HIP backend. More... | |
bool | is_ve () const |
Returns if a Tensor has VE backend. More... | |
bool | is_sparse () const |
Returns if a Tensor has sparse backend. More... | |
bool | is_sparse_csr () const |
Returns is a Tensor has a sparse CSR backend. More... | |
bool | is_mkldnn () const |
Returns if a Tensor is mkldnn tensor. More... | |
bool | is_mps () const |
Returns if a Tensor is mps tensor. More... | |
bool | is_ort () const |
Returns if a Tensor is ort tensor. More... | |
bool | is_vulkan () const |
Returns if a Tensor is vulkan tensor. More... | |
bool | is_metal () const |
Returns if a Tensor is metal tensor. More... | |
bool | is_quantized () const |
Returns if a Tensor has quantized backend. More... | |
bool | is_meta () const |
Returns if a Tensor is a meta tensor. More... | |
bool | is_inference () const |
Returns if a Tensor is an inference tensor. More... | |
bool | is_nested () const |
QuantizerPtr | quantizer () const |
If a tensor is a quantized tensor, returns its quantizer TODO: it's not in native_functions.yaml yet as it's not exposed to python. More... | |
bool | has_names () const |
Returns if a Tensor has any dimension names. More... | |
const NamedTensorMeta * | get_named_tensor_meta () const |
Returns a Tensor 's dimension names data structure. More... | |
NamedTensorMeta * | get_named_tensor_meta () |
TensorOptions | options () const |
Returns the TensorOptions corresponding to this Tensor . More... | |
void * | data_ptr () const |
template<typename T > | |
T * | data_ptr () const |
void | print () const |
template<typename T , size_t N> | |
TensorAccessor< T, N > | accessor () const & |
template<typename T , size_t N> | |
TensorAccessor< T, N > | accessor () &&=delete |
template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> | |
GenericPackedTensorAccessor< T, N, PtrTraits, index_t > | generic_packed_accessor () const & |
template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> | |
GenericPackedTensorAccessor< T, N > | generic_packed_accessor () &&=delete |
template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits> | |
PackedTensorAccessor32< T, N, PtrTraits > | packed_accessor32 () const & |
template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits> | |
PackedTensorAccessor32< T, N, PtrTraits > | packed_accessor32 () &&=delete |
template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits> | |
PackedTensorAccessor64< T, N, PtrTraits > | packed_accessor64 () const & |
template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits> | |
PackedTensorAccessor64< T, N, PtrTraits > | packed_accessor64 () &&=delete |
const TensorBase & | set_requires_grad (bool requires_grad) const |
bool | requires_grad () const |
const Tensor & | _fw_grad (uint64_t level) const |
This function returns the forward gradient for this Tensor at the given level. More... | |
void | _set_fw_grad (const TensorBase &new_grad, uint64_t level, bool is_inplace_op) const |
This function can be used to set the value of the forward grad. More... | |
at::TensorBase | tensor_data () const |
NOTE: This is similar to the legacy .data() function on Variable , and is intended to be used from functions that need to access the Variable 's equivalent Tensor (i.e. More... | |
at::TensorBase | variable_data () const |
NOTE: var.variable_data() in C++ has the same semantics as tensor.data in Python, which create a new Variable that shares the same storage and tensor metadata with the original Variable , but with a completely new autograd history. More... | |
const std::shared_ptr< torch::autograd::Node > & | grad_fn () const |
Gets the gradient function of the Variable . More... | |
template<typename T > | |
hook_return_void_t< T > | register_hook (T &&hook) const |
Registers a backward hook. More... | |
template<typename T > | |
hook_return_var_t< T > | register_hook (T &&hook) const |
void | remove_hook (unsigned pos) const |
Remove hook at given position. More... | |
bool | is_leaf () const |
All Tensors that have requires_grad() which is false will be leaf Tensors by convention. More... | |
int64_t | output_nr () const |
void | set_data (const TensorBase &new_data) const |
TensorBase | data () const |
int64_t | _version () const |
void | retain_grad () const |
Enables this Tensor to have their :attr:grad populated during :func:backward . More... | |
bool | retains_grad () const |
Is true if this Tensor is non-leaf and its :attr:grad is enabled to be populated during :func:backward , false otherwise. More... | |
const TensorBase & | requires_grad_ (bool _requires_grad=true) const |
bool | is_view () const |
Returns true if this Variable is a view of another Variable . More... | |
const TensorBase & | _base () const |
Returns the Variable that this Variable is a view of. More... | |
const std::string & | name () const |
template<typename T > | |
auto | register_hook (T &&hook) const -> TensorBase::hook_return_void_t< T > |
Static Public Member Functions | |
static TensorBase | wrap_tensor_impl (c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl > tensor_impl) |
Protected Member Functions | |
TensorBase (unsafe_borrow_t, const TensorBase &rhs) | |
unsigned | _register_hook (std::function< TensorBase(const TensorBase &)> hook) const |
void | enforce_invariants () |
Protected Attributes | |
friend | MaybeOwnedTraits< TensorBase > |
c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl > | impl_ |
using at::TensorBase::hook_return_var_t = std::enable_if_t<std::is_same<typename c10::invoke_result_t<T&, TensorBase>, TensorBase>::value, unsigned> |
using at::TensorBase::hook_return_void_t = std::enable_if_t<std::is_void<typename c10::invoke_result_t<T&, TensorBase> >::value, unsigned> |
|
inlineexplicitprotected |
|
default |
|
inlineexplicit |
|
default |
|
default |
const TensorBase & at::TensorBase::_base | ( | ) | const |
Returns the Variable
that this Variable
is a view of.
If this Variable
is not a view, throw a std::runtime_error
.
|
inline |
This function returns the forward gradient for this Tensor at the given level.
|
inline |
|
protected |
|
inline |
|
inline |
|
inline |
|
inline |
int64_t at::TensorBase::_version | ( | ) | const |
|
delete |
|
inline |
|
inline |
TensorBase at::TensorBase::data | ( | ) | const |
|
inline |
T * at::TensorBase::data_ptr | ( | ) | const |
|
inline |
|
inline |
Returns a Tensor
's device.
|
inline |
|
inline |
Returns a Tensor
's dtype (TypeMeta
).
|
inline |
|
protected |
|
delete |
|
inline |
Should be used if *this can reasonably be expected to be contiguous and performance is important.
Compared to contiguous, it saves a reference count increment/decrement if *this is already contiguous, at the cost in all cases of an extra pointer of stack usage, an extra branch to access, and an extra branch at destruction time.
const TensorBase & at::TensorBase::fill_ | ( | const c10::Scalar & | scalar | ) | const |
|
delete |
|
inline |
|
inline |
Returns a Tensor
's device index.
|
inline |
|
inline |
Returns a Tensor
's dimension names data structure.
|
inline |
const std::shared_ptr< torch::autograd::Node > & at::TensorBase::grad_fn | ( | ) | const |
Gets the gradient function of the Variable
.
If this is a leaf variable, the pointer returned will be null.
For View Variables: Gets the up-to-date grad_fn. If the shared data or base was modified, we re-create the grad_fn to express the up-to-date view relationship between this and the base Variable.
|
inline |
Returns if a Tensor
has any dimension names.
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
Returns if a Tensor
has CPU backend.
|
inline |
Returns if a Tensor
has CUDA backend.
|
inline |
|
inline |
Returns if a Tensor
has HIP backend.
|
inline |
Returns if a Tensor
has HPU backend.
|
inline |
Returns if a Tensor
is an inference tensor.
|
inline |
Returns if a Tensor
has IPU backend.
|
inline |
Returns if a Tensor
has Lazy backend.
bool at::TensorBase::is_leaf | ( | ) | const |
All Tensors that have requires_grad()
which is false
will be leaf Tensors by convention.
For Tensors that have requires_grad()
which is true
, they will be leaf Tensors if they were created by the user. This means that they are not the result of an operation and so grad_fn()
is nullptr
.
Only leaf Tensors will have their grad()
populated during a call to backward()
. To get grad()
populated for non-leaf Tensors, you can use retain_grad()
.
Example:
|
inline |
Returns if a Tensor
is a meta tensor.
Meta tensors can also have other designations.
|
inline |
Returns if a Tensor
is metal tensor.
|
inline |
Returns if a Tensor
is mkldnn tensor.
|
inline |
Returns if a Tensor
is mps tensor.
|
inline |
|
inline |
|
inline |
|
inline |
Returns if a Tensor
is ort tensor.
|
inline |
Returns if a Tensor
has quantized backend.
|
inlinenoexcept |
|
inline |
|
inline |
Returns if a Tensor
has sparse backend.
|
inline |
Returns is a Tensor
has a sparse CSR backend.
|
inline |
Returns if a Tensor
has VE backend.
bool at::TensorBase::is_view | ( | ) | const |
Returns true if this Variable
is a view of another Variable
.
|
inline |
Returns if a Tensor
is vulkan tensor.
|
inline |
Returns if a Tensor
has XLA backend.
|
inline |
Returns if a Tensor
has XPU backend.
|
inline |
|
inline |
|
inline |
Returns a Tensor
's layout.
const std::string & at::TensorBase::name | ( | ) | const |
|
inline |
|
inline |
|
inline |
|
inline |
|
delete |
|
inline |
|
deletenoexcept |
|
inlinenoexcept |
|
inline |
|
inline |
Returns the TensorOptions
corresponding to this Tensor
.
Defined in TensorOptions.h.
int64_t at::TensorBase::output_nr | ( | ) | const |
|
delete |
|
inline |
|
delete |
|
inline |
void at::TensorBase::print | ( | ) | const |
QuantizerPtr at::TensorBase::quantizer | ( | ) | const |
If a tensor is a quantized tensor, returns its quantizer TODO: it's not in native_functions.yaml yet as it's not exposed to python.
hook_return_void_t< T > at::TensorBase::register_hook | ( | T && | hook | ) | const |
Registers a backward hook.
The hook will be called every time a gradient with respect to the Tensor is computed. The hook should have one of the following signature:
The hook should not modify its argument, but it can optionally return a new gradient which will be used in place of grad
.
This function returns the index of the hook in the list which can be used to remove hook.
Example:
hook_return_var_t< T > at::TensorBase::register_hook | ( | T && | hook | ) | const |
auto at::TensorBase::register_hook | ( | T && | hook | ) | const -> TensorBase::hook_return_void_t<T> |
void at::TensorBase::remove_hook | ( | unsigned | pos | ) | const |
Remove hook at given position.
|
inline |
const TensorBase & at::TensorBase::requires_grad_ | ( | bool | _requires_grad = true | ) | const |
|
inline |
void at::TensorBase::retain_grad | ( | ) | const |
Enables this Tensor to have their :attr:grad
populated during :func:backward
.
This is a no-op for leaf tensors.
bool at::TensorBase::retains_grad | ( | ) | const |
Is true
if this Tensor is non-leaf and its :attr:grad
is enabled to be populated during :func:backward
, false
otherwise.
|
inline |
void at::TensorBase::set_data | ( | const TensorBase & | new_data | ) | const |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
at::TensorBase at::TensorBase::tensor_data | ( | ) | const |
NOTE: This is similar to the legacy .data()
function on Variable
, and is intended to be used from functions that need to access the Variable
's equivalent Tensor
(i.e.
Tensor
that shares the same storage and tensor metadata with the Variable
).
One notable difference with the legacy .data()
function is that changes to the returned Tensor
's tensor metadata (e.g. sizes / strides / storage / storage_offset) will not update the original Variable
, due to the fact that this function shallow-copies the Variable
's underlying TensorImpl.
TensorBase at::TensorBase::to | ( | at::TensorOptions | options = {} , |
bool | non_blocking = false , |
||
bool | copy = false , |
||
c10::optional< at::MemoryFormat > | memory_format = c10::nullopt |
||
) | const |
std::string at::TensorBase::toString | ( | ) | const |
|
inline |
|
inline |
|
inline |
|
inlinenoexcept |
at::TensorBase at::TensorBase::variable_data | ( | ) | const |
NOTE: var.variable_data()
in C++ has the same semantics as tensor.data
in Python, which create a new Variable
that shares the same storage and tensor metadata with the original Variable
, but with a completely new autograd history.
NOTE: If we change the tensor metadata (e.g. sizes / strides / storage / storage_offset) of a variable created from var.variable_data()
, those changes will not update the original variable var
. In .variable_data()
, we set allow_tensor_metadata_change_
to false to make such changes explicitly illegal, in order to prevent users from changing metadata of var.variable_data()
and expecting the original variable var
to also be updated.
|
inlinenoexcept |
|
inlinestatic |
const TensorBase & at::TensorBase::zero_ | ( | ) | const |
|
protected |
|
protected |