10#include "TargetInfo.h"
12#include "llvm/Support/AMDGPUAddrSpace.h"
25 static const unsigned MaxNumRegsForArgsRet = 16;
27 unsigned numRegsForType(
QualType Ty)
const;
31 uint64_t Members)
const override;
34 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty,
unsigned FromAS,
35 unsigned ToAS)
const {
37 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
38 if (PtrTy && PtrTy->getAddressSpace() == FromAS)
39 return llvm::PointerType::get(Ty->getContext(), ToAS);
50 unsigned &NumRegsLeft)
const;
57bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
61bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
62 const Type *
Base, uint64_t Members)
const {
63 uint32_t NumRegs = (getContext().getTypeSize(
Base) + 31) / 32;
66 return Members * NumRegs <= MaxNumRegsForArgsRet;
70unsigned AMDGPUABIInfo::numRegsForType(
QualType Ty)
const {
76 QualType EltTy = VT->getElementType();
77 unsigned EltSize = getContext().getTypeSize(EltTy);
81 return (VT->getNumElements() + 1) / 2;
83 unsigned EltNumRegs = (EltSize + 31) / 32;
84 return EltNumRegs * VT->getNumElements();
93 NumRegs += numRegsForType(FieldTy);
99 return (getContext().getTypeSize(Ty) + 31) / 32;
108 unsigned ArgumentIndex = 0;
111 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
113 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
114 Arg.info = classifyKernelArgumentType(Arg.type);
116 bool FixedArgument = ArgumentIndex++ < numFixedArguments;
124 const bool IsIndirect =
false;
125 const bool AllowHigherAlign =
false;
127 getContext().getTypeInfoInChars(Ty),
159 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
163 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
182 llvm::Type *OrigLTy = CGT.ConvertType(Ty);
183 llvm::Type *LTy = OrigLTy;
184 if (getContext().getLangOpts().
HIP) {
185 LTy = coerceKernelArgumentType(
186 OrigLTy, getContext().getTargetAddressSpace(LangAS::Default),
187 getContext().getTargetAddressSpace(LangAS::cuda_device));
196 if (!getContext().getLangOpts().
OpenCL && LTy == OrigLTy &&
199 getContext().getTypeAlignInChars(Ty),
200 getContext().getTargetAddressSpace(LangAS::opencl_constant),
211 unsigned &NumRegsLeft)
const {
212 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
249 unsigned NumRegs = (
Size + 31) / 32;
250 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
259 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
263 if (NumRegsLeft > 0) {
264 unsigned NumRegs = numRegsForType(Ty);
265 if (NumRegsLeft >= NumRegs) {
266 NumRegsLeft -= NumRegs;
274 getContext().getTypeAlignInChars(Ty),
275 getContext().getTargetAddressSpace(LangAS::opencl_private));
281 unsigned NumRegs = numRegsForType(Ty);
282 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
293 void setFunctionDeclAttributes(
const FunctionDecl *FD, llvm::Function *F,
303 llvm::PointerType *
T,
QualType QT)
const override;
307 getABIInfo().getDataLayout().getAllocaAddrSpace());
313 llvm::AtomicOrdering Ordering,
314 llvm::LLVMContext &Ctx)
const override;
316 llvm::Instruction &AtomicInst,
319 llvm::Function *BlockInvokeFunc,
320 llvm::Type *BlockTy)
const override;
328 llvm::GlobalValue *GV) {
329 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
332 return !
D->
hasAttr<OMPDeclareTargetDeclAttr>() &&
334 (isa<FunctionDecl>(
D) &&
D->
hasAttr<CUDAGlobalAttr>()) ||
337 cast<VarDecl>(
D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
338 cast<VarDecl>(
D)->getType()->isCUDADeviceBuiltinTextureType())));
341void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
343 const auto *ReqdWGS =
345 const bool IsOpenCLKernel =
349 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
350 if (ReqdWGS || FlatWGS) {
352 }
else if (IsOpenCLKernel || IsHIPKernel) {
355 const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
356 const unsigned DefaultMaxWorkGroupSize =
357 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
359 std::string AttrVal =
360 std::string(
"1,") + llvm::utostr(DefaultMaxWorkGroupSize);
361 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
364 if (
const auto *
Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>())
367 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
368 unsigned NumSGPR =
Attr->getNumSGPR();
371 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
374 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
378 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
381 if (
const auto *
Attr = FD->
getAttr<AMDGPUMaxNumWorkGroupsAttr>()) {
387 ?
Attr->getMaxNumWorkGroupsY()
392 ?
Attr->getMaxNumWorkGroupsZ()
398 llvm::raw_svector_ostream OS(AttrVal);
399 OS <<
X <<
',' << Y <<
',' << Z;
401 F->addFnAttr(
"amdgpu-max-num-workgroups", AttrVal.str());
407void AMDGPUTargetCodeGenInfo::emitTargetGlobals(
409 StringRef Name =
"__oclc_ABI_version";
410 llvm::GlobalVariable *OriginalGV = CGM.
getModule().getNamedGlobal(Name);
411 if (OriginalGV && !llvm::GlobalVariable::isExternalLinkage(OriginalGV->getLinkage()))
415 llvm::CodeObjectVersionKind::COV_None)
418 auto *
Type = llvm::IntegerType::getIntNTy(CGM.
getModule().getContext(), 32);
419 llvm::Constant *COV = llvm::ConstantInt::get(
424 auto *GV =
new llvm::GlobalVariable(
425 CGM.
getModule(),
Type,
true, llvm::GlobalValue::WeakODRLinkage, COV, Name,
426 nullptr, llvm::GlobalValue::ThreadLocalMode::NotThreadLocal,
428 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Local);
429 GV->setVisibility(llvm::GlobalValue::VisibilityTypes::HiddenVisibility);
433 OriginalGV->replaceAllUsesWith(GV);
434 GV->takeName(OriginalGV);
435 OriginalGV->eraseFromParent();
439void AMDGPUTargetCodeGenInfo::setTargetAttributes(
442 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
443 GV->setDSOLocal(
true);
446 if (GV->isDeclaration())
449 llvm::Function *F = dyn_cast<llvm::Function>(GV);
455 setFunctionDeclAttributes(FD, F, M);
457 if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
458 F->addFnAttr(
"amdgpu-ieee",
"false");
461unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
462 return llvm::CallingConv::AMDGPU_KERNEL;
470llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
474 return llvm::ConstantPointerNull::get(PT);
477 auto NPT = llvm::PointerType::get(
478 PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
479 return llvm::ConstantExpr::getAddrSpaceCast(
480 llvm::ConstantPointerNull::get(NPT), PT);
484AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
488 "Address space agnostic languages only");
492 return DefaultGlobalAS;
494 LangAS AddrSpace =
D->getType().getAddressSpace();
495 if (AddrSpace != LangAS::Default)
499 if (
D->getType().isConstantStorage(CGM.
getContext(),
false,
false) &&
500 D->hasConstantInitialization()) {
504 return DefaultGlobalAS;
508AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(
const LangOptions &LangOpts,
510 llvm::AtomicOrdering Ordering,
511 llvm::LLVMContext &Ctx)
const {
514 case SyncScope::HIPSingleThread:
515 case SyncScope::SingleScope:
516 Name =
"singlethread";
518 case SyncScope::HIPWavefront:
519 case SyncScope::OpenCLSubGroup:
520 case SyncScope::WavefrontScope:
523 case SyncScope::HIPWorkgroup:
524 case SyncScope::OpenCLWorkGroup:
525 case SyncScope::WorkgroupScope:
528 case SyncScope::HIPAgent:
529 case SyncScope::OpenCLDevice:
530 case SyncScope::DeviceScope:
533 case SyncScope::SystemScope:
534 case SyncScope::HIPSystem:
535 case SyncScope::OpenCLAllSVMDevices:
540 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
542 Name = Twine(Twine(Name) + Twine(
"-")).str();
544 Name = Twine(Twine(Name) + Twine(
"one-as")).str();
547 return Ctx.getOrInsertSyncScopeID(Name);
550void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(
553 auto *RMW = dyn_cast<llvm::AtomicRMWInst>(&AtomicInst);
554 auto *CmpX = dyn_cast<llvm::AtomicCmpXchgInst>(&AtomicInst);
561 if (((RMW && RMW->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS) ||
563 CmpX->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS)) &&
566 llvm::MDNode *ASRange = MDHelper.createRange(
567 llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS),
568 llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS + 1));
569 AtomicInst.setMetadata(llvm::LLVMContext::MD_noalias_addrspace, ASRange);
577 llvm::AtomicRMWInst::BinOp RMWOp = RMW->getOperation();
578 if (llvm::AtomicRMWInst::isFPOperation(RMWOp)) {
580 RMW->setMetadata(
"amdgpu.no.fine.grained.memory",
Empty);
582 if (RMWOp == llvm::AtomicRMWInst::FAdd && RMW->getType()->isFloatTy())
583 RMW->setMetadata(
"amdgpu.ignore.denormal.mode",
Empty);
587bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
591bool AMDGPUTargetCodeGenInfo::shouldEmitDWARFBitFieldSeparators()
const {
595void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
597 FT = getABIInfo().getContext().adjustFunctionType(
609llvm::Value *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
610 CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy)
const {
614 auto *InvokeFT = Invoke->getFunctionType();
623 ArgTys.push_back(BlockTy);
624 ArgTypeNames.push_back(llvm::MDString::get(
C,
"__block_literal"));
625 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
626 ArgBaseTypeNames.push_back(llvm::MDString::get(
C,
"__block_literal"));
627 ArgTypeQuals.push_back(llvm::MDString::get(
C,
""));
628 AccessQuals.push_back(llvm::MDString::get(
C,
"none"));
629 ArgNames.push_back(llvm::MDString::get(
C,
"block_literal"));
630 for (
unsigned I = 1,
E = InvokeFT->getNumParams(); I <
E; ++I) {
631 ArgTys.push_back(InvokeFT->getParamType(I));
632 ArgTypeNames.push_back(llvm::MDString::get(
C,
"void*"));
633 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
634 AccessQuals.push_back(llvm::MDString::get(
C,
"none"));
635 ArgBaseTypeNames.push_back(llvm::MDString::get(
C,
"void*"));
636 ArgTypeQuals.push_back(llvm::MDString::get(
C,
""));
638 llvm::MDString::get(
C, (Twine(
"local_arg") + Twine(I)).str()));
640 std::string Name = Invoke->getName().str() +
"_kernel";
641 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(
C), ArgTys,
false);
642 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
644 F->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
646 llvm::AttrBuilder KernelAttrs(
C);
650 KernelAttrs.addAttribute(
"enqueued-block");
651 F->addFnAttrs(KernelAttrs);
653 auto IP = CGF.
Builder.saveIP();
654 auto *BB = llvm::BasicBlock::Create(
C,
"entry", F);
655 Builder.SetInsertPoint(BB);
657 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
658 BlockPtr->setAlignment(BlockAlign);
659 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
660 auto *
Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
662 Args.push_back(Cast);
663 for (llvm::Argument &A : llvm::drop_begin(F->args()))
665 llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
666 call->setCallingConv(Invoke->getCallingConv());
667 Builder.CreateRetVoid();
668 Builder.restoreIP(IP);
670 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(
C, AddressQuals));
671 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(
C, AccessQuals));
672 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(
C, ArgTypeNames));
673 F->setMetadata(
"kernel_arg_base_type",
674 llvm::MDNode::get(
C, ArgBaseTypeNames));
675 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(
C, ArgTypeQuals));
677 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(
C, ArgNames));
683 llvm::Function *F,
const AMDGPUFlatWorkGroupSizeAttr *FlatWGS,
684 const ReqdWorkGroupSizeAttr *ReqdWGS, int32_t *MinThreadsVal,
685 int32_t *MaxThreadsVal) {
689 Min = FlatWGS->getMin()->EvaluateKnownConstInt(
getContext()).getExtValue();
690 Max = FlatWGS->getMax()->EvaluateKnownConstInt(
getContext()).getExtValue();
692 if (ReqdWGS &&
Min == 0 &&
Max == 0)
693 Min =
Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
696 assert(
Min <=
Max &&
"Min must be less than or equal Max");
699 *MinThreadsVal =
Min;
701 *MaxThreadsVal =
Max;
702 std::string AttrVal = llvm::utostr(
Min) +
"," + llvm::utostr(
Max);
704 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
706 assert(
Max == 0 &&
"Max must be zero");
710 llvm::Function *F,
const AMDGPUWavesPerEUAttr *
Attr) {
712 Attr->getMin()->EvaluateKnownConstInt(
getContext()).getExtValue();
715 ?
Attr->getMax()->EvaluateKnownConstInt(
getContext()).getExtValue()
719 assert((
Max == 0 ||
Min <=
Max) &&
"Min must be less than or equal Max");
721 std::string AttrVal = llvm::utostr(
Min);
723 AttrVal = AttrVal +
"," + llvm::utostr(
Max);
724 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
726 assert(
Max == 0 &&
"Max must be zero");
729std::unique_ptr<TargetCodeGenInfo>
731 return std::make_unique<AMDGPUTargetCodeGenInfo>(CGM.
getTypes());
static bool requiresAMDGPUProtectedVisibility(const Decl *D, llvm::GlobalValue *GV)
Defines the clang::TargetOptions class.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
unsigned getTargetAddressSpace(LangAS AS) const
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
bool threadPrivateMemoryAtomicsAreUndefined() const
Return true if atomics operations targeting allocations in private memory are undefined.
Attr - This represents one attribute.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getIndirectAliased(CharUnits Alignment, unsigned AddrSpace, bool Realign=false, llvm::Type *Padding=nullptr)
Pass this in memory using the IR byref attribute.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
unsigned getNumRequiredArgs() const
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
const TargetInfo & getTarget() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
void handleAMDGPUWavesPerEUAttr(llvm::Function *F, const AMDGPUWavesPerEUAttr *A)
Emit the IR encoding to attach the AMD GPU waves-per-eu attribute to F.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
void handleAMDGPUFlatWorkGroupSizeAttr(llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *A, const ReqdWorkGroupSizeAttr *ReqdWGS=nullptr, int32_t *MinThreadsVal=nullptr, int32_t *MaxThreadsVal=nullptr)
Emit the IR encoding to attach the AMD GPU flat-work-group-size attribute to F.
const llvm::DataLayout & getDataLayout() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
DefaultABIInfo - The default implementation for ABI specific details.
ABIArgInfo classifyArgumentType(QualType RetTy) const
RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, AggValueSlot Slot) const override
EmitVAArg - Emit the target dependent code to load a value of.
ABIArgInfo classifyReturnType(QualType RetTy) const
void computeInfo(CGFunctionInfo &FI) const override
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
const T & getABIInfo() const
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual bool shouldEmitDWARFBitFieldSeparators() const
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
virtual LangAS getASTAllocaAddressSpace() const
Get the AST address space for alloca.
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const
Allow the target to apply other metadata to an atomic instruction.
virtual llvm::Value * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Type *BlockTy) const
Create an OpenCL kernel for an enqueued block.
virtual void emitTargetGlobals(CodeGen::CodeGenModule &CGM) const
Provides a convenient hook to handle extra target-specific globals.
virtual bool shouldEmitStaticExternCAliases() const
Decl - This represents one declaration (or definition), e.g.
This represents one expression.
Represents a member of a struct/union/class.
Represents a function declaration or definition.
ExtInfo withCallingConv(CallingConv cc) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
A (possibly-)qualified type.
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_range fields() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Scope - A scope is a transient data structure that is used while parsing the program.
TargetOptions & getTargetOpts() const
Retrieve the target options.
virtual std::optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory.
bool allowAMDGPUUnsafeFPAtomics() const
Returns whether or not the AMDGPU unsafe floating point atomics are allowed.
llvm::CodeObjectVersionKind CodeObjectVersion
Code object version for AMDGPU.
The base class of the type hierarchy.
const T * getAs() const
Member-template getAs<specific type>'.
Represents a variable declaration or definition.
Represents a GCC generic vector type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
bool Cast(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
LangAS
Defines the address space values used by the address space qualifier of QualType.
const FunctionProtoType * T
SyncScope
Defines synch scope values used internally by clang.
LangAS getLangASFromTargetAS(unsigned TargetAS)