clang 20.0.0git
SPIR.cpp
Go to the documentation of this file.
1//===- SPIR.cpp -----------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
11
12using namespace clang;
13using namespace clang::CodeGen;
14
15//===----------------------------------------------------------------------===//
16// Base ABI and target codegen info implementation common between SPIR and
17// SPIR-V.
18//===----------------------------------------------------------------------===//
19
20namespace {
21class CommonSPIRABIInfo : public DefaultABIInfo {
22public:
23 CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
24
25private:
26 void setCCs();
27};
28
29class SPIRVABIInfo : public CommonSPIRABIInfo {
30public:
31 SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
32 void computeInfo(CGFunctionInfo &FI) const override;
33
34private:
36 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
38};
39} // end anonymous namespace
40namespace {
41class CommonSPIRTargetCodeGenInfo : public TargetCodeGenInfo {
42public:
43 CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
44 : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {}
45 CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
46 : TargetCodeGenInfo(std::move(ABIInfo)) {}
47
48 LangAS getASTAllocaAddressSpace() const override {
50 getABIInfo().getDataLayout().getAllocaAddrSpace());
51 }
52
53 unsigned getOpenCLKernelCallingConv() const override;
54 llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const override;
55 llvm::Type *getHLSLType(CodeGenModule &CGM, const Type *Ty) const override;
56 llvm::Type *getSPIRVImageTypeFromHLSLResource(
58 llvm::Type *ElementType, llvm::LLVMContext &Ctx) const;
59};
60class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
61public:
62 SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
63 : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {}
64 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
65 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
66 const VarDecl *D) const override;
67 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
68 CodeGen::CodeGenModule &M) const override;
69 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
71 llvm::AtomicOrdering Ordering,
72 llvm::LLVMContext &Ctx) const override;
73};
74
75inline StringRef mapClangSyncScopeToLLVM(SyncScope Scope) {
76 switch (Scope) {
77 case SyncScope::HIPSingleThread:
78 case SyncScope::SingleScope:
79 return "singlethread";
80 case SyncScope::HIPWavefront:
81 case SyncScope::OpenCLSubGroup:
82 case SyncScope::WavefrontScope:
83 return "subgroup";
84 case SyncScope::HIPWorkgroup:
85 case SyncScope::OpenCLWorkGroup:
86 case SyncScope::WorkgroupScope:
87 return "workgroup";
88 case SyncScope::HIPAgent:
89 case SyncScope::OpenCLDevice:
90 case SyncScope::DeviceScope:
91 return "device";
92 case SyncScope::SystemScope:
93 case SyncScope::HIPSystem:
94 case SyncScope::OpenCLAllSVMDevices:
95 return "";
96 }
97 return "";
98}
99} // End anonymous namespace.
100
101void CommonSPIRABIInfo::setCCs() {
102 assert(getRuntimeCC() == llvm::CallingConv::C);
103 RuntimeCC = llvm::CallingConv::SPIR_FUNC;
104}
105
106ABIArgInfo SPIRVABIInfo::classifyReturnType(QualType RetTy) const {
107 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
109 if (!isAggregateTypeForABI(RetTy) || getRecordArgABI(RetTy, getCXXABI()))
111
112 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
113 const RecordDecl *RD = RT->getDecl();
114 if (RD->hasFlexibleArrayMember())
116 }
117
118 // TODO: The AMDGPU ABI is non-trivial to represent in SPIR-V; in order to
119 // avoid encoding various architecture specific bits here we return everything
120 // as direct to retain type info for things like aggregates, for later perusal
121 // when translating back to LLVM/lowering in the BE. This is also why we
122 // disable flattening as the outcomes can mismatch between SPIR-V and AMDGPU.
123 // This will be revisited / optimised in the future.
124 return ABIArgInfo::getDirect(CGT.ConvertType(RetTy), 0u, nullptr, false);
125}
126
127ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
128 if (getContext().getLangOpts().CUDAIsDevice) {
129 // Coerce pointer arguments with default address space to CrossWorkGroup
130 // pointers for HIPSPV/CUDASPV. When the language mode is HIP/CUDA, the
131 // SPIRTargetInfo maps cuda_device to SPIR-V's CrossWorkGroup address space.
132 llvm::Type *LTy = CGT.ConvertType(Ty);
133 auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
134 auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device);
135 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
136 if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
137 LTy = llvm::PointerType::get(PtrTy->getContext(), GlobalAS);
138 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
139 }
140
141 if (isAggregateTypeForABI(Ty)) {
142 if (getTarget().getTriple().getVendor() == llvm::Triple::AMD)
143 // TODO: The AMDGPU kernel ABI passes aggregates byref, which is not
144 // currently expressible in SPIR-V; SPIR-V passes aggregates byval,
145 // which the AMDGPU kernel ABI does not allow. Passing aggregates as
146 // direct works around this impedance mismatch, as it retains type info
147 // and can be correctly handled, post reverse-translation, by the AMDGPU
148 // BE, which has to support this CC for legacy OpenCL purposes. It can
149 // be brittle and does lead to performance degradation in certain
150 // pathological cases. This will be revisited / optimised in the future,
151 // once a way to deal with the byref/byval impedance mismatch is
152 // identified.
153 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
154 // Force copying aggregate type in kernel arguments by value when
155 // compiling CUDA targeting SPIR-V. This is required for the object
156 // copied to be valid on the device.
157 // This behavior follows the CUDA spec
158 // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-function-argument-processing,
159 // and matches the NVPTX implementation.
160 return getNaturalAlignIndirect(Ty, /* byval */ true);
161 }
162 }
163 return classifyArgumentType(Ty);
164}
165
166ABIArgInfo SPIRVABIInfo::classifyArgumentType(QualType Ty) const {
167 if (getTarget().getTriple().getVendor() != llvm::Triple::AMD)
169 if (!isAggregateTypeForABI(Ty))
171
172 // Records with non-trivial destructors/copy-constructors should not be
173 // passed by value.
174 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
175 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
176
177 if (const RecordType *RT = Ty->getAs<RecordType>()) {
178 const RecordDecl *RD = RT->getDecl();
179 if (RD->hasFlexibleArrayMember())
181 }
182
183 return ABIArgInfo::getDirect(CGT.ConvertType(Ty), 0u, nullptr, false);
184}
185
186void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
187 // The logic is same as in DefaultABIInfo with an exception on the kernel
188 // arguments handling.
189 llvm::CallingConv::ID CC = FI.getCallingConvention();
190
191 if (!getCXXABI().classifyReturnType(FI))
193
194 for (auto &I : FI.arguments()) {
195 if (CC == llvm::CallingConv::SPIR_KERNEL) {
196 I.info = classifyKernelArgumentType(I.type);
197 } else {
198 I.info = classifyArgumentType(I.type);
199 }
200 }
201}
202
203namespace clang {
204namespace CodeGen {
206 if (CGM.getTarget().getTriple().isSPIRV())
207 SPIRVABIInfo(CGM.getTypes()).computeInfo(FI);
208 else
209 CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI);
210}
211}
212}
213
214unsigned CommonSPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
215 return llvm::CallingConv::SPIR_KERNEL;
216}
217
218void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
219 const FunctionType *&FT) const {
220 // Convert HIP kernels to SPIR-V kernels.
221 if (getABIInfo().getContext().getLangOpts().HIP) {
222 FT = getABIInfo().getContext().adjustFunctionType(
224 return;
225 }
226}
227
228LangAS
229SPIRVTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
230 const VarDecl *D) const {
231 assert(!CGM.getLangOpts().OpenCL &&
232 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
233 "Address space agnostic languages only");
234 // If we're here it means that we're using the SPIRDefIsGen ASMap, hence for
235 // the global AS we can rely on either cuda_device or sycl_global to be
236 // correct; however, since this is not a CUDA Device context, we use
237 // sycl_global to prevent confusion with the assertion.
238 LangAS DefaultGlobalAS = getLangASFromTargetAS(
239 CGM.getContext().getTargetAddressSpace(LangAS::sycl_global));
240 if (!D)
241 return DefaultGlobalAS;
242
243 LangAS AddrSpace = D->getType().getAddressSpace();
244 if (AddrSpace != LangAS::Default)
245 return AddrSpace;
246
247 return DefaultGlobalAS;
248}
249
250void SPIRVTargetCodeGenInfo::setTargetAttributes(
251 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
252 if (!M.getLangOpts().HIP ||
253 M.getTarget().getTriple().getVendor() != llvm::Triple::AMD)
254 return;
255 if (GV->isDeclaration())
256 return;
257
258 auto F = dyn_cast<llvm::Function>(GV);
259 if (!F)
260 return;
261
262 auto FD = dyn_cast_or_null<FunctionDecl>(D);
263 if (!FD)
264 return;
265 if (!FD->hasAttr<CUDAGlobalAttr>())
266 return;
267
268 unsigned N = M.getLangOpts().GPUMaxThreadsPerBlock;
269 if (auto FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>())
270 N = FlatWGS->getMax()->EvaluateKnownConstInt(M.getContext()).getExtValue();
271
272 // We encode the maximum flat WG size in the first component of the 3D
273 // max_work_group_size attribute, which will get reverse translated into the
274 // original AMDGPU attribute when targeting AMDGPU.
275 auto Int32Ty = llvm::IntegerType::getInt32Ty(M.getLLVMContext());
276 llvm::Metadata *AttrMDArgs[] = {
277 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, N)),
278 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1)),
279 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 1))};
280
281 F->setMetadata("max_work_group_size",
282 llvm::MDNode::get(M.getLLVMContext(), AttrMDArgs));
283}
284
285llvm::SyncScope::ID
286SPIRVTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &, SyncScope Scope,
287 llvm::AtomicOrdering,
288 llvm::LLVMContext &Ctx) const {
289 return Ctx.getOrInsertSyncScopeID(mapClangSyncScopeToLLVM(Scope));
290}
291
292/// Construct a SPIR-V target extension type for the given OpenCL image type.
293static llvm::Type *getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType,
294 StringRef OpenCLName,
295 unsigned AccessQualifier) {
296 // These parameters compare to the operands of OpTypeImage (see
297 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage
298 // for more details). The first 6 integer parameters all default to 0, and
299 // will be changed to 1 only for the image type(s) that set the parameter to
300 // one. The 7th integer parameter is the access qualifier, which is tacked on
301 // at the end.
302 SmallVector<unsigned, 7> IntParams = {0, 0, 0, 0, 0, 0};
303
304 // Choose the dimension of the image--this corresponds to the Dim enum in
305 // SPIR-V (first integer parameter of OpTypeImage).
306 if (OpenCLName.starts_with("image2d"))
307 IntParams[0] = 1; // 1D
308 else if (OpenCLName.starts_with("image3d"))
309 IntParams[0] = 2; // 2D
310 else if (OpenCLName == "image1d_buffer")
311 IntParams[0] = 5; // Buffer
312 else
313 assert(OpenCLName.starts_with("image1d") && "Unknown image type");
314
315 // Set the other integer parameters of OpTypeImage if necessary. Note that the
316 // OpenCL image types don't provide any information for the Sampled or
317 // Image Format parameters.
318 if (OpenCLName.contains("_depth"))
319 IntParams[1] = 1;
320 if (OpenCLName.contains("_array"))
321 IntParams[2] = 1;
322 if (OpenCLName.contains("_msaa"))
323 IntParams[3] = 1;
324
325 // Access qualifier
326 IntParams.push_back(AccessQualifier);
327
328 return llvm::TargetExtType::get(Ctx, BaseType, {llvm::Type::getVoidTy(Ctx)},
329 IntParams);
330}
331
332llvm::Type *CommonSPIRTargetCodeGenInfo::getOpenCLType(CodeGenModule &CGM,
333 const Type *Ty) const {
334 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
335 if (auto *PipeTy = dyn_cast<PipeType>(Ty))
336 return llvm::TargetExtType::get(Ctx, "spirv.Pipe", {},
337 {!PipeTy->isReadOnly()});
338 if (auto *BuiltinTy = dyn_cast<BuiltinType>(Ty)) {
339 enum AccessQualifier : unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 };
340 switch (BuiltinTy->getKind()) {
341#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
342 case BuiltinType::Id: \
343 return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix);
344#include "clang/Basic/OpenCLImageTypes.def"
345 case BuiltinType::OCLSampler:
346 return llvm::TargetExtType::get(Ctx, "spirv.Sampler");
347 case BuiltinType::OCLEvent:
348 return llvm::TargetExtType::get(Ctx, "spirv.Event");
349 case BuiltinType::OCLClkEvent:
350 return llvm::TargetExtType::get(Ctx, "spirv.DeviceEvent");
351 case BuiltinType::OCLQueue:
352 return llvm::TargetExtType::get(Ctx, "spirv.Queue");
353 case BuiltinType::OCLReserveID:
354 return llvm::TargetExtType::get(Ctx, "spirv.ReserveId");
355#define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \
356 case BuiltinType::OCLIntelSubgroupAVC##Id: \
357 return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL");
358#include "clang/Basic/OpenCLExtensionTypes.def"
359 default:
360 return nullptr;
361 }
362 }
363
364 return nullptr;
365}
366
367llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(CodeGenModule &CGM,
368 const Type *Ty) const {
369 auto *ResType = dyn_cast<HLSLAttributedResourceType>(Ty);
370 if (!ResType)
371 return nullptr;
372
373 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
374 const HLSLAttributedResourceType::Attributes &ResAttrs = ResType->getAttrs();
375 switch (ResAttrs.ResourceClass) {
376 case llvm::dxil::ResourceClass::UAV:
377 case llvm::dxil::ResourceClass::SRV: {
378 // TypedBuffer and RawBuffer both need element type
379 QualType ContainedTy = ResType->getContainedType();
380 if (ContainedTy.isNull())
381 return nullptr;
382
383 assert(!ResAttrs.RawBuffer &&
384 "Raw buffers handles are not implemented for SPIR-V yet");
385 assert(!ResAttrs.IsROV &&
386 "Rasterizer order views not implemented for SPIR-V yet");
387
388 // convert element type
389 llvm::Type *ElemType = CGM.getTypes().ConvertType(ContainedTy);
390 return getSPIRVImageTypeFromHLSLResource(ResAttrs, ElemType, Ctx);
391 }
392 case llvm::dxil::ResourceClass::CBuffer:
393 llvm_unreachable("CBuffer handles are not implemented for SPIR-V yet");
394 break;
395 case llvm::dxil::ResourceClass::Sampler:
396 return llvm::TargetExtType::get(Ctx, "spirv.Sampler");
397 }
398 return nullptr;
399}
400
401llvm::Type *CommonSPIRTargetCodeGenInfo::getSPIRVImageTypeFromHLSLResource(
403 llvm::Type *ElementType, llvm::LLVMContext &Ctx) const {
404
405 if (ElementType->isVectorTy())
406 ElementType = ElementType->getScalarType();
407
408 assert((ElementType->isIntegerTy() || ElementType->isFloatingPointTy()) &&
409 "The element type for a SPIR-V resource must be a scalar integer or "
410 "floating point type.");
411
412 // These parameters correspond to the operands to the OpTypeImage SPIR-V
413 // instruction. See
414 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage.
415 SmallVector<unsigned, 6> IntParams(6, 0);
416
417 // Dim
418 // For now we assume everything is a buffer.
419 IntParams[0] = 5;
420
421 // Depth
422 // HLSL does not indicate if it is a depth texture or not, so we use unknown.
423 IntParams[1] = 2;
424
425 // Arrayed
426 IntParams[2] = 0;
427
428 // MS
429 IntParams[3] = 0;
430
431 // Sampled
432 IntParams[4] =
433 attributes.ResourceClass == llvm::dxil::ResourceClass::UAV ? 2 : 1;
434
435 // Image format.
436 // Setting to unknown for now.
437 IntParams[5] = 0;
438
439 return llvm::TargetExtType::get(Ctx, "spirv.Image", {ElementType}, IntParams);
440}
441
442std::unique_ptr<TargetCodeGenInfo>
444 return std::make_unique<CommonSPIRTargetCodeGenInfo>(CGM.getTypes());
445}
446
447std::unique_ptr<TargetCodeGenInfo>
449 return std::make_unique<SPIRVTargetCodeGenInfo>(CGM.getTypes());
450}
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
Definition: CGCall.cpp:293
const Decl * D
static llvm::Type * getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType, StringRef OpenCLName, unsigned AccessQualifier)
Construct a SPIR-V target extension type for the given OpenCL image type.
Definition: SPIR.cpp:293
unsigned getTargetAddressSpace(LangAS AS) const
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
Definition: ABIInfo.h:47
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:158
CGFunctionInfo - Class to encapsulate the information about a function definition.
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
This class organizes the cross-function state that is used while generating LLVM code.
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
DefaultABIInfo - The default implementation for ABI specific details.
Definition: ABIInfoImpl.h:21
ABIArgInfo classifyArgumentType(QualType RetTy) const
Definition: ABIInfoImpl.cpp:17
ABIArgInfo classifyReturnType(QualType RetTy) const
Definition: ABIInfoImpl.cpp:46
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition: TargetInfo.h:47
virtual llvm::Type * getOpenCLType(CodeGenModule &CGM, const Type *T) const
Return an LLVM type that corresponds to an OpenCL type.
Definition: TargetInfo.h:437
virtual llvm::Type * getHLSLType(CodeGenModule &CGM, const Type *T) const
Return an LLVM type that corresponds to a HLSL type.
Definition: TargetInfo.h:442
const T & getABIInfo() const
Definition: TargetInfo.h:57
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
Definition: TargetInfo.cpp:106
virtual LangAS getASTAllocaAddressSpace() const
Get the AST address space for alloca.
Definition: TargetInfo.h:316
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:4547
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:4321
ExtInfo getExtInfo() const
Definition: Type.h:4655
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:499
A (possibly-)qualified type.
Definition: Type.h:929
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
Represents a struct/union/class.
Definition: Decl.h:4148
bool hasFlexibleArrayMember() const
Definition: Decl.h:4181
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:6072
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1262
The base class of the type hierarchy.
Definition: Type.h:1828
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8731
Represents a variable declaration or definition.
Definition: Decl.h:882
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Definition: SPIR.cpp:205
bool isAggregateTypeForABI(QualType T)
Definition: ABIInfoImpl.cpp:94
std::unique_ptr< TargetCodeGenInfo > createSPIRVTargetCodeGenInfo(CodeGenModule &CGM)
Definition: SPIR.cpp:448
std::unique_ptr< TargetCodeGenInfo > createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM)
Definition: SPIR.cpp:443
The JSON file list parser is used to communicate input to InstallAPI.
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
const FunctionProtoType * T
SyncScope
Defines synch scope values used internally by clang.
Definition: SyncScope.h:42
@ CC_OpenCLKernel
Definition: Specifiers.h:292
LangAS getLangASFromTargetAS(unsigned TargetAS)
Definition: AddressSpaces.h:86
llvm::dxil::ResourceClass ResourceClass
Definition: Type.h:6255