|
PyTorch
|
#include <cstdint>#include <cuda_runtime_api.h>#include <cusparse.h>#include <cublas_v2.h>#include <ATen/core/ATenGeneral.h>#include <ATen/Context.h>#include <c10/cuda/CUDAStream.h>#include <c10/cuda/CUDAFunctions.h>#include <ATen/cuda/Exceptions.h>Go to the source code of this file.
Namespaces | |
| namespace | at |
| namespace | at::cuda |
Functions | |
| int64_t | at::cuda::getNumGPUs () |
| DEPRECATED: use device_count() instead. More... | |
| bool | at::cuda::is_available () |
| CUDA is available if we compiled with CUDA, and there are one or more devices. More... | |
| cudaDeviceProp * | at::cuda::getCurrentDeviceProperties () |
| int | at::cuda::warp_size () |
| cudaDeviceProp * | at::cuda::getDeviceProperties (int64_t device) |
| bool | at::cuda::canDeviceAccessPeer (int64_t device, int64_t peer_device) |
| Allocator * | at::cuda::getCUDADeviceAllocator () |
| cusparseHandle_t | at::cuda::getCurrentCUDASparseHandle () |
| cublasHandle_t | at::cuda::getCurrentCUDABlasHandle () |
| void | at::cuda::clearCublasWorkspaces () |