PyTorch
Loading...
Searching...
No Matches
Functions
at::cuda Namespace Reference

Functions

int64_t getNumGPUs ()
 DEPRECATED: use device_count() instead. More...
 
bool is_available ()
 CUDA is available if we compiled with CUDA, and there are one or more devices. More...
 
cudaDeviceProp * getCurrentDeviceProperties ()
 
int warp_size ()
 
cudaDeviceProp * getDeviceProperties (int64_t device)
 
bool canDeviceAccessPeer (int64_t device, int64_t peer_device)
 
Allocator * getCUDADeviceAllocator ()
 
cusparseHandle_t getCurrentCUDASparseHandle ()
 
cublasHandle_t getCurrentCUDABlasHandle ()
 
void clearCublasWorkspaces ()
 

Function Documentation

◆ canDeviceAccessPeer()

bool at::cuda::canDeviceAccessPeer ( int64_t  device,
int64_t  peer_device 
)

◆ clearCublasWorkspaces()

void at::cuda::clearCublasWorkspaces ( )

◆ getCUDADeviceAllocator()

Allocator * at::cuda::getCUDADeviceAllocator ( )

◆ getCurrentCUDABlasHandle()

cublasHandle_t at::cuda::getCurrentCUDABlasHandle ( )

◆ getCurrentCUDASparseHandle()

cusparseHandle_t at::cuda::getCurrentCUDASparseHandle ( )

◆ getCurrentDeviceProperties()

cudaDeviceProp * at::cuda::getCurrentDeviceProperties ( )

◆ getDeviceProperties()

cudaDeviceProp * at::cuda::getDeviceProperties ( int64_t  device)

◆ getNumGPUs()

int64_t at::cuda::getNumGPUs ( )
inline

DEPRECATED: use device_count() instead.

◆ is_available()

bool at::cuda::is_available ( )
inline

CUDA is available if we compiled with CUDA, and there are one or more devices.

If we compiled with CUDA but there is a driver problem, etc., this function will report CUDA is not available (rather than raise an error.)

◆ warp_size()

int at::cuda::warp_size ( )