forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCUDAException.cpp
48 lines (39 loc) · 1.34 KB
/
CUDAException.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDADeviceAssertionHost.h>
#include <c10/util/Exception.h>
#include <cuda_runtime.h>
#include <string>
namespace c10 {
namespace cuda {
void c10_cuda_check_implementation(
const int32_t err,
const char* filename,
const char* function_name,
const int line_number,
const bool include_device_assertions) {
const auto cuda_error = static_cast<cudaError_t>(err);
const auto cuda_kernel_failure = include_device_assertions
? c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().has_failed()
: false;
if (C10_LIKELY(cuda_error == cudaSuccess && !cuda_kernel_failure)) {
return;
}
auto error_unused C10_UNUSED = cudaGetLastError();
(void)error_unused;
std::string check_message;
#ifndef STRIP_ERROR_MESSAGES
check_message.append("CUDA error: ");
check_message.append(cudaGetErrorString(cuda_error));
check_message.append(c10::cuda::get_cuda_check_suffix());
check_message.append("\n");
if (include_device_assertions) {
check_message.append(c10_retrieve_device_side_assertion_info());
} else {
check_message.append(
"Device-side assertions were explicitly omitted for this error check; the error probably arose while initializing the DSA handlers.");
}
#endif
TORCH_CHECK(false, check_message);
}
} // namespace cuda
} // namespace c10