PyTorch
|
Functions | |
int64_t | getNumGPUs () |
DEPRECATED: use device_count() instead. More... | |
bool | is_available () |
CUDA is available if we compiled with CUDA, and there are one or more devices. More... | |
cudaDeviceProp * | getCurrentDeviceProperties () |
int | warp_size () |
cudaDeviceProp * | getDeviceProperties (int64_t device) |
bool | canDeviceAccessPeer (int64_t device, int64_t peer_device) |
Allocator * | getCUDADeviceAllocator () |
cusparseHandle_t | getCurrentCUDASparseHandle () |
cublasHandle_t | getCurrentCUDABlasHandle () |
void | clearCublasWorkspaces () |
bool at::cuda::canDeviceAccessPeer | ( | int64_t | device, |
int64_t | peer_device | ||
) |
void at::cuda::clearCublasWorkspaces | ( | ) |
Allocator * at::cuda::getCUDADeviceAllocator | ( | ) |
cublasHandle_t at::cuda::getCurrentCUDABlasHandle | ( | ) |
cusparseHandle_t at::cuda::getCurrentCUDASparseHandle | ( | ) |
cudaDeviceProp * at::cuda::getCurrentDeviceProperties | ( | ) |
cudaDeviceProp * at::cuda::getDeviceProperties | ( | int64_t | device | ) |
|
inline |
DEPRECATED: use device_count() instead.
|
inline |
CUDA is available if we compiled with CUDA, and there are one or more devices.
If we compiled with CUDA but there is a driver problem, etc., this function will report CUDA is not available (rather than raise an error.)
int at::cuda::warp_size | ( | ) |