PyTorch
Loading...
Searching...
No Matches
Classes | Public Types | Public Member Functions | Static Public Member Functions | Protected Member Functions | Protected Attributes | List of all members
at::TensorBase Class Reference

#include <TensorBase.h>

Inheritance diagram for at::TensorBase:
at::Tensor

Classes

struct  unsafe_borrow_t
 

Public Types

template<typename T >
using hook_return_void_t = std::enable_if_t< std::is_void< typename c10::invoke_result_t< T &, TensorBase > >::value, unsigned >
 
template<typename T >
using hook_return_var_t = std::enable_if_t< std::is_same< typename c10::invoke_result_t< T &, TensorBase >, TensorBase >::value, unsigned >
 

Public Member Functions

 TensorBase ()=default
 
 TensorBase (c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl > tensor_impl)
 
 TensorBase (const TensorBase &)=default
 
 TensorBase (TensorBase &&)=default
 
int64_t dim () const
 
int64_t storage_offset () const
 
TensorBase contiguous (MemoryFormat memory_format=MemoryFormat::Contiguous) const
 
c10::MaybeOwned< TensorBaseexpect_contiguous (MemoryFormat memory_format=MemoryFormat::Contiguous) const &
 Should be used if *this can reasonably be expected to be contiguous and performance is important. More...
 
c10::MaybeOwned< TensorBaseexpect_contiguous (MemoryFormat memory_format=MemoryFormat::Contiguous) &&=delete
 
const TensorBasefill_ (const c10::Scalar &scalar) const
 
const TensorBasezero_ () const
 
TensorBase to (at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional< at::MemoryFormat > memory_format=c10::nullopt) const
 
bool is_complex () const
 
bool is_floating_point () const
 
bool is_signed () const
 
c10::SymInt sym_size (int64_t dim) const
 
c10::SymInt sym_stride (int64_t dim) const
 
int64_t size (int64_t dim) const
 
int64_t stride (int64_t dim) const
 
TensorImpl * unsafeGetTensorImpl () const
 
TensorImpl * unsafeReleaseTensorImpl ()
 
const c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl > & getIntrusivePtr () const
 
c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl > unsafeReleaseIntrusivePtr ()
 
bool defined () const
 
void reset ()
 
TensorBaseoperator= (const TensorBase &x) &
 
TensorBaseoperator= (TensorBase &&x) &noexcept
 
TensorBaseoperator= (const TensorBase &) &&=delete
 
TensorBaseoperator= (TensorBase &&) &&noexcept=delete
 
bool is_same (const TensorBase &other) const noexcept
 
size_t use_count () const noexcept
 
size_t weak_use_count () const noexcept
 
std::string toString () const
 
IntArrayRef sizes () const
 
c10::SymIntArrayRef sym_sizes () const
 
c10::SymIntArrayRef sym_strides () const
 
IntArrayRef strides () const
 
c10::optional< DimnameList > opt_names () const
 
DimnameList names () const
 
int64_t ndimension () const
 
bool is_contiguous (at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const
 
bool is_non_overlapping_and_dense () const
 
at::MemoryFormat suggest_memory_format (bool channels_last_strides_exact_match=false) const
 
size_t nbytes () const
 
c10::SymInt sym_nbytes () const
 
int64_t numel () const
 
c10::SymInt sym_numel () const
 
c10::SymInt sym_storage_offset () const
 
size_t itemsize () const
 
int64_t element_size () const
 
DispatchKeySet key_set () const
 
ScalarType scalar_type () const
 
bool has_storage () const
 
const Storage & storage () const
 
bool is_alias_of (const at::TensorBase &other) const
 
bool _is_zerotensor () const
 
void _set_zero (bool zero) const
 
bool is_conj () const
 
void _set_conj (bool conjugate) const
 
bool is_neg () const
 
void _set_neg (bool negative) const
 
Layout layout () const
 Returns a Tensor's layout. More...
 
caffe2::TypeMeta dtype () const
 Returns a Tensor's dtype (TypeMeta). More...
 
Device device () const
 Returns a Tensor's device. More...
 
int64_t get_device () const
 Returns a Tensor's device index. More...
 
bool is_cpu () const
 Returns if a Tensor has CPU backend. More...
 
bool is_cuda () const
 Returns if a Tensor has CUDA backend. More...
 
bool is_ipu () const
 Returns if a Tensor has IPU backend. More...
 
bool is_xpu () const
 Returns if a Tensor has XPU backend. More...
 
bool is_xla () const
 Returns if a Tensor has XLA backend. More...
 
bool is_hpu () const
 Returns if a Tensor has HPU backend. More...
 
bool is_lazy () const
 Returns if a Tensor has Lazy backend. More...
 
bool is_hip () const
 Returns if a Tensor has HIP backend. More...
 
bool is_ve () const
 Returns if a Tensor has VE backend. More...
 
bool is_sparse () const
 Returns if a Tensor has sparse backend. More...
 
bool is_sparse_csr () const
 Returns is a Tensor has a sparse CSR backend. More...
 
bool is_mkldnn () const
 Returns if a Tensor is mkldnn tensor. More...
 
bool is_mps () const
 Returns if a Tensor is mps tensor. More...
 
bool is_ort () const
 Returns if a Tensor is ort tensor. More...
 
bool is_vulkan () const
 Returns if a Tensor is vulkan tensor. More...
 
bool is_metal () const
 Returns if a Tensor is metal tensor. More...
 
bool is_quantized () const
 Returns if a Tensor has quantized backend. More...
 
bool is_meta () const
 Returns if a Tensor is a meta tensor. More...
 
bool is_inference () const
 Returns if a Tensor is an inference tensor. More...
 
bool is_nested () const
 
QuantizerPtr quantizer () const
 If a tensor is a quantized tensor, returns its quantizer TODO: it's not in native_functions.yaml yet as it's not exposed to python. More...
 
bool has_names () const
 Returns if a Tensor has any dimension names. More...
 
const NamedTensorMeta * get_named_tensor_meta () const
 Returns a Tensor's dimension names data structure. More...
 
NamedTensorMeta * get_named_tensor_meta ()
 
TensorOptions options () const
 Returns the TensorOptions corresponding to this Tensor. More...
 
void * data_ptr () const
 
template<typename T >
T * data_ptr () const
 
void print () const
 
template<typename T , size_t N>
TensorAccessor< T, N > accessor () const &
 
template<typename T , size_t N>
TensorAccessor< T, N > accessor () &&=delete
 
template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
GenericPackedTensorAccessor< T, N, PtrTraits, index_t > generic_packed_accessor () const &
 
template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
GenericPackedTensorAccessor< T, N > generic_packed_accessor () &&=delete
 
template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits>
PackedTensorAccessor32< T, N, PtrTraits > packed_accessor32 () const &
 
template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits>
PackedTensorAccessor32< T, N, PtrTraits > packed_accessor32 () &&=delete
 
template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits>
PackedTensorAccessor64< T, N, PtrTraits > packed_accessor64 () const &
 
template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits>
PackedTensorAccessor64< T, N, PtrTraits > packed_accessor64 () &&=delete
 
const TensorBaseset_requires_grad (bool requires_grad) const
 
bool requires_grad () const
 
const Tensor_fw_grad (uint64_t level) const
 This function returns the forward gradient for this Tensor at the given level. More...
 
void _set_fw_grad (const TensorBase &new_grad, uint64_t level, bool is_inplace_op) const
 This function can be used to set the value of the forward grad. More...
 
at::TensorBase tensor_data () const
 NOTE: This is similar to the legacy .data() function on Variable, and is intended to be used from functions that need to access the Variable's equivalent Tensor (i.e. More...
 
at::TensorBase variable_data () const
 NOTE: var.variable_data() in C++ has the same semantics as tensor.data in Python, which create a new Variable that shares the same storage and tensor metadata with the original Variable, but with a completely new autograd history. More...
 
const std::shared_ptr< torch::autograd::Node > & grad_fn () const
 Gets the gradient function of the Variable. More...
 
template<typename T >
hook_return_void_t< T > register_hook (T &&hook) const
 Registers a backward hook. More...
 
template<typename T >
hook_return_var_t< T > register_hook (T &&hook) const
 
void remove_hook (unsigned pos) const
 Remove hook at given position. More...
 
bool is_leaf () const
 All Tensors that have requires_grad() which is false will be leaf Tensors by convention. More...
 
int64_t output_nr () const
 
void set_data (const TensorBase &new_data) const
 
TensorBase data () const
 
int64_t _version () const
 
void retain_grad () const
 Enables this Tensor to have their :attr:grad populated during :func:backward. More...
 
bool retains_grad () const
 Is true if this Tensor is non-leaf and its :attr:grad is enabled to be populated during :func:backward, false otherwise. More...
 
const TensorBaserequires_grad_ (bool _requires_grad=true) const
 
bool is_view () const
 Returns true if this Variable is a view of another Variable. More...
 
const TensorBase_base () const
 Returns the Variable that this Variable is a view of. More...
 
const std::string & name () const
 
template<typename T >
auto register_hook (T &&hook) const -> TensorBase::hook_return_void_t< T >
 

Static Public Member Functions

static TensorBase wrap_tensor_impl (c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl > tensor_impl)
 

Protected Member Functions

 TensorBase (unsafe_borrow_t, const TensorBase &rhs)
 
unsigned _register_hook (std::function< TensorBase(const TensorBase &)> hook) const
 
void enforce_invariants ()
 

Protected Attributes

friend MaybeOwnedTraits< TensorBase >
 
c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl > impl_
 

Member Typedef Documentation

◆ hook_return_var_t

template<typename T >
using at::TensorBase::hook_return_var_t = std::enable_if_t<std::is_same<typename c10::invoke_result_t<T&, TensorBase>, TensorBase>::value, unsigned>

◆ hook_return_void_t

template<typename T >
using at::TensorBase::hook_return_void_t = std::enable_if_t<std::is_void<typename c10::invoke_result_t<T&, TensorBase> >::value, unsigned>

Constructor & Destructor Documentation

◆ TensorBase() [1/5]

at::TensorBase::TensorBase ( unsafe_borrow_t  ,
const TensorBase rhs 
)
inlineexplicitprotected

◆ TensorBase() [2/5]

at::TensorBase::TensorBase ( )
default

◆ TensorBase() [3/5]

at::TensorBase::TensorBase ( c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl >  tensor_impl)
inlineexplicit

◆ TensorBase() [4/5]

at::TensorBase::TensorBase ( const TensorBase )
default

◆ TensorBase() [5/5]

at::TensorBase::TensorBase ( TensorBase &&  )
default

Member Function Documentation

◆ _base()

const TensorBase & at::TensorBase::_base ( ) const

Returns the Variable that this Variable is a view of.

If this Variable is not a view, throw a std::runtime_error.

◆ _fw_grad()

const Tensor & at::TensorBase::_fw_grad ( uint64_t  level) const
inline

This function returns the forward gradient for this Tensor at the given level.

◆ _is_zerotensor()

bool at::TensorBase::_is_zerotensor ( ) const
inline

◆ _register_hook()

unsigned at::TensorBase::_register_hook ( std::function< TensorBase(const TensorBase &)>  hook) const
protected

◆ _set_conj()

void at::TensorBase::_set_conj ( bool  conjugate) const
inline

◆ _set_fw_grad()

void at::TensorBase::_set_fw_grad ( const TensorBase new_grad,
uint64_t  level,
bool  is_inplace_op 
) const
inline

This function can be used to set the value of the forward grad.

Note that the given new_grad might not be used directly if it has different metadata (size/stride/storage offset) compared to this Tensor. In that case, new_grad content will be copied into a new Tensor

◆ _set_neg()

void at::TensorBase::_set_neg ( bool  negative) const
inline

◆ _set_zero()

void at::TensorBase::_set_zero ( bool  zero) const
inline

◆ _version()

int64_t at::TensorBase::_version ( ) const

◆ accessor() [1/2]

template<typename T , size_t N>
TensorAccessor< T, N > at::TensorBase::accessor ( ) &&
delete

◆ accessor() [2/2]

template<typename T , size_t N>
TensorAccessor< T, N > at::TensorBase::accessor ( ) const &
inline

◆ contiguous()

TensorBase at::TensorBase::contiguous ( MemoryFormat  memory_format = MemoryFormat::Contiguous) const
inline

◆ data()

TensorBase at::TensorBase::data ( ) const

◆ data_ptr() [1/2]

void * at::TensorBase::data_ptr ( ) const
inline

◆ data_ptr() [2/2]

template<typename T >
T * at::TensorBase::data_ptr ( ) const

◆ defined()

bool at::TensorBase::defined ( ) const
inline

◆ device()

Device at::TensorBase::device ( ) const
inline

Returns a Tensor's device.

◆ dim()

int64_t at::TensorBase::dim ( ) const
inline

◆ dtype()

caffe2::TypeMeta at::TensorBase::dtype ( ) const
inline

Returns a Tensor's dtype (TypeMeta).

◆ element_size()

int64_t at::TensorBase::element_size ( ) const
inline

◆ enforce_invariants()

void at::TensorBase::enforce_invariants ( )
protected

◆ expect_contiguous() [1/2]

c10::MaybeOwned< TensorBase > at::TensorBase::expect_contiguous ( MemoryFormat  memory_format = MemoryFormat::Contiguous) &&
delete

◆ expect_contiguous() [2/2]

c10::MaybeOwned< TensorBase > at::TensorBase::expect_contiguous ( MemoryFormat  memory_format = MemoryFormat::Contiguous) const &
inline

Should be used if *this can reasonably be expected to be contiguous and performance is important.

Compared to contiguous, it saves a reference count increment/decrement if *this is already contiguous, at the cost in all cases of an extra pointer of stack usage, an extra branch to access, and an extra branch at destruction time.

◆ fill_()

const TensorBase & at::TensorBase::fill_ ( const c10::Scalar &  scalar) const

◆ generic_packed_accessor() [1/2]

template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
GenericPackedTensorAccessor< T, N > at::TensorBase::generic_packed_accessor ( ) &&
delete

◆ generic_packed_accessor() [2/2]

template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
GenericPackedTensorAccessor< T, N, PtrTraits, index_t > at::TensorBase::generic_packed_accessor ( ) const &
inline

◆ get_device()

int64_t at::TensorBase::get_device ( ) const
inline

Returns a Tensor's device index.

◆ get_named_tensor_meta() [1/2]

NamedTensorMeta * at::TensorBase::get_named_tensor_meta ( )
inline

◆ get_named_tensor_meta() [2/2]

const NamedTensorMeta * at::TensorBase::get_named_tensor_meta ( ) const
inline

Returns a Tensor's dimension names data structure.

◆ getIntrusivePtr()

const c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl > & at::TensorBase::getIntrusivePtr ( ) const
inline

◆ grad_fn()

const std::shared_ptr< torch::autograd::Node > & at::TensorBase::grad_fn ( ) const

Gets the gradient function of the Variable.

If this is a leaf variable, the pointer returned will be null.

For View Variables: Gets the up-to-date grad_fn. If the shared data or base was modified, we re-create the grad_fn to express the up-to-date view relationship between this and the base Variable.

◆ has_names()

bool at::TensorBase::has_names ( ) const
inline

Returns if a Tensor has any dimension names.

◆ has_storage()

bool at::TensorBase::has_storage ( ) const
inline

◆ is_alias_of()

bool at::TensorBase::is_alias_of ( const at::TensorBase other) const
inline

◆ is_complex()

bool at::TensorBase::is_complex ( ) const
inline

◆ is_conj()

bool at::TensorBase::is_conj ( ) const
inline

◆ is_contiguous()

bool at::TensorBase::is_contiguous ( at::MemoryFormat  memory_format = at::MemoryFormat::Contiguous) const
inline

◆ is_cpu()

bool at::TensorBase::is_cpu ( ) const
inline

Returns if a Tensor has CPU backend.

◆ is_cuda()

bool at::TensorBase::is_cuda ( ) const
inline

Returns if a Tensor has CUDA backend.

◆ is_floating_point()

bool at::TensorBase::is_floating_point ( ) const
inline

◆ is_hip()

bool at::TensorBase::is_hip ( ) const
inline

Returns if a Tensor has HIP backend.

◆ is_hpu()

bool at::TensorBase::is_hpu ( ) const
inline

Returns if a Tensor has HPU backend.

◆ is_inference()

bool at::TensorBase::is_inference ( ) const
inline

Returns if a Tensor is an inference tensor.

◆ is_ipu()

bool at::TensorBase::is_ipu ( ) const
inline

Returns if a Tensor has IPU backend.

◆ is_lazy()

bool at::TensorBase::is_lazy ( ) const
inline

Returns if a Tensor has Lazy backend.

◆ is_leaf()

bool at::TensorBase::is_leaf ( ) const

All Tensors that have requires_grad() which is false will be leaf Tensors by convention.

For Tensors that have requires_grad() which is true, they will be leaf Tensors if they were created by the user. This means that they are not the result of an operation and so grad_fn() is nullptr.

Only leaf Tensors will have their grad() populated during a call to backward(). To get grad() populated for non-leaf Tensors, you can use retain_grad().

Example:

auto a = torch::rand(10, torch::requires_grad());
std::cout << a.is_leaf() << std::endl; // prints `true`
auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA);
std::cout << b.is_leaf() << std::endl; // prints `false`
// b was created by the operation that cast a cpu Tensor into a cuda Tensor
auto c = torch::rand(10, torch::requires_grad()) + 2;
std::cout << c.is_leaf() << std::endl; // prints `false`
// c was created by the addition operation
auto d = torch::rand(10).cuda();
std::cout << d.is_leaf() << std::endl; // prints `true`
// d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
auto e = torch::rand(10).cuda().requires_grad_();
std::cout << e.is_leaf() << std::endl; // prints `true`
// e requires gradients and has no operations creating it
auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true));
std::cout << f.is_leaf() << std::endl; // prints `true`
// f requires grad, has no operation creating it
bool requires_grad() const
Definition: TensorBase.h:695

◆ is_meta()

bool at::TensorBase::is_meta ( ) const
inline

Returns if a Tensor is a meta tensor.

Meta tensors can also have other designations.

◆ is_metal()

bool at::TensorBase::is_metal ( ) const
inline

Returns if a Tensor is metal tensor.

◆ is_mkldnn()

bool at::TensorBase::is_mkldnn ( ) const
inline

Returns if a Tensor is mkldnn tensor.

◆ is_mps()

bool at::TensorBase::is_mps ( ) const
inline

Returns if a Tensor is mps tensor.

◆ is_neg()

bool at::TensorBase::is_neg ( ) const
inline

◆ is_nested()

bool at::TensorBase::is_nested ( ) const
inline

◆ is_non_overlapping_and_dense()

bool at::TensorBase::is_non_overlapping_and_dense ( ) const
inline

◆ is_ort()

bool at::TensorBase::is_ort ( ) const
inline

Returns if a Tensor is ort tensor.

◆ is_quantized()

bool at::TensorBase::is_quantized ( ) const
inline

Returns if a Tensor has quantized backend.

◆ is_same()

bool at::TensorBase::is_same ( const TensorBase other) const
inlinenoexcept

◆ is_signed()

bool at::TensorBase::is_signed ( ) const
inline

◆ is_sparse()

bool at::TensorBase::is_sparse ( ) const
inline

Returns if a Tensor has sparse backend.

◆ is_sparse_csr()

bool at::TensorBase::is_sparse_csr ( ) const
inline

Returns is a Tensor has a sparse CSR backend.

◆ is_ve()

bool at::TensorBase::is_ve ( ) const
inline

Returns if a Tensor has VE backend.

◆ is_view()

bool at::TensorBase::is_view ( ) const

Returns true if this Variable is a view of another Variable.

◆ is_vulkan()

bool at::TensorBase::is_vulkan ( ) const
inline

Returns if a Tensor is vulkan tensor.

◆ is_xla()

bool at::TensorBase::is_xla ( ) const
inline

Returns if a Tensor has XLA backend.

◆ is_xpu()

bool at::TensorBase::is_xpu ( ) const
inline

Returns if a Tensor has XPU backend.

◆ itemsize()

size_t at::TensorBase::itemsize ( ) const
inline

◆ key_set()

DispatchKeySet at::TensorBase::key_set ( ) const
inline

◆ layout()

Layout at::TensorBase::layout ( ) const
inline

Returns a Tensor's layout.

◆ name()

const std::string & at::TensorBase::name ( ) const

◆ names()

DimnameList at::TensorBase::names ( ) const
inline

◆ nbytes()

size_t at::TensorBase::nbytes ( ) const
inline

◆ ndimension()

int64_t at::TensorBase::ndimension ( ) const
inline

◆ numel()

int64_t at::TensorBase::numel ( ) const
inline

◆ operator=() [1/4]

TensorBase & at::TensorBase::operator= ( const TensorBase ) &&
delete

◆ operator=() [2/4]

TensorBase & at::TensorBase::operator= ( const TensorBase x) &
inline

◆ operator=() [3/4]

TensorBase & at::TensorBase::operator= ( TensorBase &&  ) &&
deletenoexcept

◆ operator=() [4/4]

TensorBase & at::TensorBase::operator= ( TensorBase &&  x) &
inlinenoexcept

◆ opt_names()

c10::optional< DimnameList > at::TensorBase::opt_names ( ) const
inline

◆ options()

TensorOptions at::TensorBase::options ( ) const
inline

Returns the TensorOptions corresponding to this Tensor.

Defined in TensorOptions.h.

◆ output_nr()

int64_t at::TensorBase::output_nr ( ) const

◆ packed_accessor32() [1/2]

template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits>
PackedTensorAccessor32< T, N, PtrTraits > at::TensorBase::packed_accessor32 ( ) &&
delete

◆ packed_accessor32() [2/2]

template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits>
PackedTensorAccessor32< T, N, PtrTraits > at::TensorBase::packed_accessor32 ( ) const &
inline

◆ packed_accessor64() [1/2]

template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits>
PackedTensorAccessor64< T, N, PtrTraits > at::TensorBase::packed_accessor64 ( ) &&
delete

◆ packed_accessor64() [2/2]

template<typename T , size_t N, template< typename U > class PtrTraits = DefaultPtrTraits>
PackedTensorAccessor64< T, N, PtrTraits > at::TensorBase::packed_accessor64 ( ) const &
inline

◆ print()

void at::TensorBase::print ( ) const

◆ quantizer()

QuantizerPtr at::TensorBase::quantizer ( ) const

If a tensor is a quantized tensor, returns its quantizer TODO: it's not in native_functions.yaml yet as it's not exposed to python.

◆ register_hook() [1/3]

template<typename T >
hook_return_void_t< T > at::TensorBase::register_hook ( T &&  hook) const

Registers a backward hook.

The hook will be called every time a gradient with respect to the Tensor is computed. The hook should have one of the following signature:

hook(TensorBase grad) -> TensorBase
Definition: TensorBase.h:79
hook(TensorBase grad) -> void

The hook should not modify its argument, but it can optionally return a new gradient which will be used in place of grad.

This function returns the index of the hook in the list which can be used to remove hook.

Example:

auto v = torch::tensor({0., 0., 0.}, torch::requires_grad());
auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient
v.backward(torch::tensor({1., 2., 3.}));
// This prints:
// ```
// 2
// 4
// 6
// [ CPUFloatType{3} ]
// ```
std::cout << v.grad() << std::endl;
v.remove_hook(h); // removes the hook

◆ register_hook() [2/3]

template<typename T >
hook_return_var_t< T > at::TensorBase::register_hook ( T &&  hook) const

◆ register_hook() [3/3]

template<typename T >
auto at::TensorBase::register_hook ( T &&  hook) const -> TensorBase::hook_return_void_t<T>

◆ remove_hook()

void at::TensorBase::remove_hook ( unsigned  pos) const

Remove hook at given position.

◆ requires_grad()

bool at::TensorBase::requires_grad ( ) const
inline

◆ requires_grad_()

const TensorBase & at::TensorBase::requires_grad_ ( bool  _requires_grad = true) const

◆ reset()

void at::TensorBase::reset ( )
inline

◆ retain_grad()

void at::TensorBase::retain_grad ( ) const

Enables this Tensor to have their :attr:grad populated during :func:backward.

This is a no-op for leaf tensors.

◆ retains_grad()

bool at::TensorBase::retains_grad ( ) const

Is true if this Tensor is non-leaf and its :attr:grad is enabled to be populated during :func:backward, false otherwise.

◆ scalar_type()

ScalarType at::TensorBase::scalar_type ( ) const
inline

◆ set_data()

void at::TensorBase::set_data ( const TensorBase new_data) const

◆ set_requires_grad()

const TensorBase & at::TensorBase::set_requires_grad ( bool  requires_grad) const
inline

◆ size()

int64_t at::TensorBase::size ( int64_t  dim) const
inline

◆ sizes()

IntArrayRef at::TensorBase::sizes ( ) const
inline

◆ storage()

const Storage & at::TensorBase::storage ( ) const
inline

◆ storage_offset()

int64_t at::TensorBase::storage_offset ( ) const
inline

◆ stride()

int64_t at::TensorBase::stride ( int64_t  dim) const
inline

◆ strides()

IntArrayRef at::TensorBase::strides ( ) const
inline

◆ suggest_memory_format()

at::MemoryFormat at::TensorBase::suggest_memory_format ( bool  channels_last_strides_exact_match = false) const
inline

◆ sym_nbytes()

c10::SymInt at::TensorBase::sym_nbytes ( ) const
inline

◆ sym_numel()

c10::SymInt at::TensorBase::sym_numel ( ) const
inline

◆ sym_size()

c10::SymInt at::TensorBase::sym_size ( int64_t  dim) const
inline

◆ sym_sizes()

c10::SymIntArrayRef at::TensorBase::sym_sizes ( ) const
inline

◆ sym_storage_offset()

c10::SymInt at::TensorBase::sym_storage_offset ( ) const
inline

◆ sym_stride()

c10::SymInt at::TensorBase::sym_stride ( int64_t  dim) const
inline

◆ sym_strides()

c10::SymIntArrayRef at::TensorBase::sym_strides ( ) const
inline

◆ tensor_data()

at::TensorBase at::TensorBase::tensor_data ( ) const

NOTE: This is similar to the legacy .data() function on Variable, and is intended to be used from functions that need to access the Variable's equivalent Tensor (i.e.

Tensor that shares the same storage and tensor metadata with the Variable).

One notable difference with the legacy .data() function is that changes to the returned Tensor's tensor metadata (e.g. sizes / strides / storage / storage_offset) will not update the original Variable, due to the fact that this function shallow-copies the Variable's underlying TensorImpl.

◆ to()

TensorBase at::TensorBase::to ( at::TensorOptions  options = {},
bool  non_blocking = false,
bool  copy = false,
c10::optional< at::MemoryFormat >  memory_format = c10::nullopt 
) const

◆ toString()

std::string at::TensorBase::toString ( ) const

◆ unsafeGetTensorImpl()

TensorImpl * at::TensorBase::unsafeGetTensorImpl ( ) const
inline

◆ unsafeReleaseIntrusivePtr()

c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl > at::TensorBase::unsafeReleaseIntrusivePtr ( )
inline

◆ unsafeReleaseTensorImpl()

TensorImpl * at::TensorBase::unsafeReleaseTensorImpl ( )
inline

◆ use_count()

size_t at::TensorBase::use_count ( ) const
inlinenoexcept

◆ variable_data()

at::TensorBase at::TensorBase::variable_data ( ) const

NOTE: var.variable_data() in C++ has the same semantics as tensor.data in Python, which create a new Variable that shares the same storage and tensor metadata with the original Variable, but with a completely new autograd history.

NOTE: If we change the tensor metadata (e.g. sizes / strides / storage / storage_offset) of a variable created from var.variable_data(), those changes will not update the original variable var. In .variable_data(), we set allow_tensor_metadata_change_ to false to make such changes explicitly illegal, in order to prevent users from changing metadata of var.variable_data() and expecting the original variable var to also be updated.

◆ weak_use_count()

size_t at::TensorBase::weak_use_count ( ) const
inlinenoexcept

◆ wrap_tensor_impl()

static TensorBase at::TensorBase::wrap_tensor_impl ( c10::intrusive_ptr< TensorImpl, UndefinedTensorImpl >  tensor_impl)
inlinestatic

◆ zero_()

const TensorBase & at::TensorBase::zero_ ( ) const

Member Data Documentation

◆ impl_

c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> at::TensorBase::impl_
protected

◆ MaybeOwnedTraits< TensorBase >

friend at::TensorBase::MaybeOwnedTraits< TensorBase >
protected

The documentation for this class was generated from the following file: