26#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__
27#define __CLANG_CUDA_RUNTIME_WRAPPER_H__
29#if defined(__CUDA__) && defined(__clang__)
49#pragma push_macro("__THROW")
50#pragma push_macro("__CUDA_ARCH__")
56#if !defined(CUDA_VERSION)
57#error "cuda.h did not define CUDA_VERSION"
58#elif CUDA_VERSION < 7000
59#error "Unsupported CUDA version!"
62#pragma push_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
63#if CUDA_VERSION >= 10000
64#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
70#define __CUDA_ARCH__ 9999
77#define __DEVICE_LAUNCH_PARAMETERS_H__
82#define __DEVICE_FUNCTIONS_H__
83#define __MATH_FUNCTIONS_H__
84#define __COMMON_FUNCTIONS_H__
87#define __DEVICE_FUNCTIONS_DECLS_H__
90#if CUDA_VERSION < 9000
94#define __CUDA_LIBDEVICE__
98#include "host_defines.h"
100#include "driver_types.h"
101#include "host_config.h"
106#pragma push_macro("nv_weak")
109#undef __CUDA_LIBDEVICE__
111#include "cuda_runtime.h"
113#pragma pop_macro("nv_weak")
119#define __nvvm_memcpy(s, d, n, a) __builtin_memcpy(s, d, n)
120#define __nvvm_memset(d, c, n, a) __builtin_memset(d, c, n)
122#if CUDA_VERSION < 9000
123#include "crt/device_runtime.h"
125#include "crt/host_runtime.h"
130#undef __cxa_vec_cctor
135#undef __cxa_vec_delete2
136#undef __cxa_vec_delete
137#undef __cxa_vec_delete3
138#undef __cxa_pure_virtual
145 return std::signbit(x);
156#if CUDA_VERSION >= 9000
172#if defined(CU_DEVICE_INVALID)
173#if !defined(__USE_FAST_MATH__)
174#define __USE_FAST_MATH__ 0
177#if !defined(__CUDA_PREC_DIV)
178#define __CUDA_PREC_DIV 0
184#pragma push_macro("__host__")
185#define __host__ UNEXPECTED_HOST_ATTRIBUTE
191#pragma push_macro("__forceinline__")
192#define __forceinline__ __device__ __inline__ __attribute__((always_inline))
193#if CUDA_VERSION < 9000
194#include "device_functions.hpp"
203#pragma push_macro("__USE_FAST_MATH__")
204#if defined(__CLANG_GPU_APPROX_TRANSCENDENTALS__)
205#define __USE_FAST_MATH__ 1
208#if CUDA_VERSION >= 9000
209#include "crt/math_functions.hpp"
211#include "math_functions.hpp"
214#pragma pop_macro("__USE_FAST_MATH__")
216#if CUDA_VERSION < 9000
217#include "math_functions_dbl_ptx3.hpp"
219#pragma pop_macro("__forceinline__")
223#undef __MATH_FUNCTIONS_HPP__
225#if CUDA_VERSION < 9000
226#include "math_functions.hpp"
243#if CUDA_VERSION < 9000
257#if CUDA_VERSION >= 9000
260#include "device_atomic_functions.h"
262#undef __DEVICE_FUNCTIONS_HPP__
263#include "device_atomic_functions.hpp"
264#if CUDA_VERSION >= 9000
265#include "crt/device_functions.hpp"
266#include "crt/device_double_functions.hpp"
268#include "device_functions.hpp"
270#include "device_double_functions.h"
273#include "sm_20_atomic_functions.hpp"
278#pragma push_macro("__isGlobal")
279#pragma push_macro("__isShared")
280#pragma push_macro("__isConstant")
281#pragma push_macro("__isLocal")
282#define __isGlobal __ignored_cuda___isGlobal
283#define __isShared __ignored_cuda___isShared
284#define __isConstant __ignored_cuda___isConstant
285#define __isLocal __ignored_cuda___isLocal
286#include "sm_20_intrinsics.hpp"
287#pragma pop_macro("__isGlobal")
288#pragma pop_macro("__isShared")
289#pragma pop_macro("__isConstant")
290#pragma pop_macro("__isLocal")
291#pragma push_macro("__DEVICE__")
292#define __DEVICE__ static __device__ __forceinline__ __attribute__((const))
293__DEVICE__ unsigned int __isGlobal(
const void *p) {
294 return __nvvm_isspacep_global(p);
296__DEVICE__ unsigned int __isShared(
const void *p) {
297 return __nvvm_isspacep_shared(p);
299__DEVICE__ unsigned int __isConstant(
const void *p) {
300 return __nvvm_isspacep_const(p);
302__DEVICE__ unsigned int __isLocal(
const void *p) {
303 return __nvvm_isspacep_local(p);
305#pragma pop_macro("__DEVICE__")
306#include "sm_32_atomic_functions.hpp"
316#if CUDA_VERSION >= 8000
317#pragma push_macro("__CUDA_ARCH__")
319#include "sm_60_atomic_functions.hpp"
320#include "sm_61_intrinsics.hpp"
321#pragma pop_macro("__CUDA_ARCH__")
324#undef __MATH_FUNCTIONS_HPP__
330#pragma push_macro("signbit")
331#pragma push_macro("__GNUC__")
333#define signbit __ignored_cuda_signbit
338#pragma push_macro("_GLIBCXX_MATH_H")
339#pragma push_macro("_LIBCPP_VERSION")
340#if CUDA_VERSION >= 9000
341#undef _GLIBCXX_MATH_H
343#ifdef _LIBCPP_VERSION
344#define _LIBCPP_VERSION 3700
348#if CUDA_VERSION >= 9000
349#include "crt/math_functions.hpp"
351#include "math_functions.hpp"
353#pragma pop_macro("_GLIBCXX_MATH_H")
354#pragma pop_macro("_LIBCPP_VERSION")
355#pragma pop_macro("__GNUC__")
356#pragma pop_macro("signbit")
358#pragma pop_macro("__host__")
363#if __cplusplus >= 201103L && CUDA_VERSION >= 9000
368#if CUDA_VERSION >= 9000
370template <
typename T>
struct __nv_tex_needs_cxx11 {
371 const static bool value =
false;
375 cudaTextureObject_t obj,
377 _Static_assert(__nv_tex_needs_cxx11<T>::value,
378 "Texture support requires C++11");
387#include "texture_fetch_functions.h"
388#include "texture_indirect_functions.h"
391#pragma pop_macro("__CUDA_ARCH__")
392#pragma pop_macro("__THROW")
404__device__ int vprintf(
const char *,
const char *);
405__device__ void free(
void *) __attribute((nothrow));
412__device__ void __assertfail(
const char *__message,
const char *__file,
413 unsigned __line,
const char *__function,
418__device__ static inline void __assert_fail(
const char *__message,
419 const char *__file,
unsigned __line,
420 const char *__function) {
421 __assertfail(__message, __file, __line, __function,
sizeof(
char));
431__device__ static inline void free(
void *__ptr) { ::free(__ptr); }
432__device__ static inline void *malloc(
size_t __size) {
433 return ::malloc(__size);
440__device__ inline __cuda_builtin_threadIdx_t::operator
dim3()
const {
441 return dim3(x, y, z);
448__device__ inline __cuda_builtin_blockIdx_t::operator
dim3()
const {
449 return dim3(x, y, z);
456__device__ inline __cuda_builtin_blockDim_t::operator
dim3()
const {
457 return dim3(x, y, z);
464__device__ inline __cuda_builtin_gridDim_t::operator
dim3()
const {
465 return dim3(x, y, z);
482#pragma push_macro("dim3")
483#pragma push_macro("uint3")
484#define dim3 __cuda_builtin_blockDim_t
485#define uint3 __cuda_builtin_threadIdx_t
486#include "curand_mtgp32_kernel.h"
487#pragma pop_macro("dim3")
488#pragma pop_macro("uint3")
489#pragma pop_macro("__USE_FAST_MATH__")
490#pragma pop_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
496#if CUDA_VERSION >= 9020
498 size_t sharedMem = 0,
__CUDA_BUILTIN_VAR __cuda_builtin_blockDim_t blockDim
__CUDA_BUILTIN_VAR __cuda_builtin_gridDim_t gridDim
__DEVICE__ int __signbitd(double __a)
__DEVICE__ float rsqrtf(float __a)
__DEVICE__ double normcdf(double __a)
__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c)
__DEVICE__ float rcbrtf(float __a)
__DEVICE__ float erfcinvf(float __a)
__DEVICE__ float sinpif(float __a)
__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c)
__DEVICE__ double rcbrt(double __a)
__DEVICE__ float normcdff(float __a)
__DEVICE__ double cospi(double __a)
__DEVICE__ double sinpi(double __a)
__DEVICE__ float erfcxf(float __a)
__DEVICE__ float normcdfinvf(float __a)
__DEVICE__ double normcdfinv(double __a)
__DEVICE__ float cospif(float __a)
__DEVICE__ double erfcx(double __a)
__DEVICE__ double rsqrt(double __a)
__DEVICE__ double erfcinv(double __a)
#define __nv_tex_surf_handler(__op, __ptr,...)
_Float16 __2f16 __attribute__((ext_vector_type(2)))
Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
static __inline__ vector float vector float vector float __c
static __inline__ vector float vector float __b
static __inline__ void int __a
int printf(__constant const char *st,...) __attribute__((format(printf