32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Assumptions.h"
35#include "llvm/IR/AttributeMask.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/CallingConv.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InlineAsm.h"
40#include "llvm/IR/IntrinsicInst.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Type.h"
43#include "llvm/Transforms/Utils/Local.h"
46using namespace CodeGen;
52 default:
return llvm::CallingConv::C;
57 case CC_Win64:
return llvm::CallingConv::Win64;
59 case CC_AAPCS:
return llvm::CallingConv::ARM_AAPCS;
60 case CC_AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
73 case CC_Swift:
return llvm::CallingConv::Swift;
75 case CC_M68kRTD:
return llvm::CallingConv::M68k_RTD;
129 unsigned totalArgs) {
131 assert(paramInfos.size() <= prefixArgs);
132 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
134 paramInfos.reserve(totalArgs);
137 paramInfos.resize(prefixArgs);
141 paramInfos.push_back(ParamInfo);
143 if (ParamInfo.hasPassObjectSize())
144 paramInfos.emplace_back();
147 assert(paramInfos.size() <= totalArgs &&
148 "Did we forget to insert pass_object_size args?");
150 paramInfos.resize(totalArgs);
160 if (!FPT->hasExtParameterInfos()) {
161 assert(paramInfos.empty() &&
162 "We have paramInfos, but the prototype doesn't?");
163 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
167 unsigned PrefixSize = prefix.size();
171 prefix.reserve(prefix.size() + FPT->getNumParams());
173 auto ExtInfos = FPT->getExtParameterInfos();
174 assert(ExtInfos.size() == FPT->getNumParams());
175 for (
unsigned I = 0,
E = FPT->getNumParams(); I !=
E; ++I) {
176 prefix.push_back(FPT->getParamType(I));
177 if (ExtInfos[I].hasPassObjectSize())
200 FTP->getExtInfo(), paramInfos,
Required);
208 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
233 if (PcsAttr *PCS =
D->
getAttr<PcsAttr>())
236 if (
D->
hasAttr<AArch64VectorPcsAttr>())
239 if (
D->
hasAttr<AArch64SVEPcsAttr>())
242 if (
D->
hasAttr<AMDGPUKernelCallAttr>())
266 if (
D->
hasAttr<RISCVVectorCCAttr>())
287 return ::arrangeLLVMFunctionInfo(
288 *
this,
true, argTypes,
295 if (FD->
hasAttr<CUDAGlobalAttr>()) {
308 assert(!isa<CXXConstructorDecl>(MD) &&
"wrong method for constructors!");
309 assert(!isa<CXXDestructorDecl>(MD) &&
"wrong method for destructors!");
331 !
Target.getCXXABI().hasConstructorVariants();
336 auto *MD = cast<CXXMethodDecl>(GD.
getDecl());
344 bool PassParams =
true;
346 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
349 if (
auto Inherited = CD->getInheritedConstructor())
361 if (!paramInfos.empty()) {
364 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
367 paramInfos.append(AddedArgs.
Suffix,
372 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
381 argTypes, extInfo, paramInfos, required);
387 for (
auto &arg : args)
395 for (
auto &arg : args)
402 unsigned prefixArgs,
unsigned totalArgs) {
422 unsigned ExtraPrefixArgs,
423 unsigned ExtraSuffixArgs,
424 bool PassProtoArgs) {
427 for (
const auto &Arg : args)
431 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
436 FPT, TotalPrefixArgs + ExtraSuffixArgs)
449 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
456 ArgTypes, Info, ParamInfos,
Required);
464 if (MD->isImplicitObjectMemberFunction())
469 assert(isa<FunctionType>(FTy));
476 std::nullopt, noProto->getExtInfo(), {},
511 I->hasAttr<NoEscapeAttr>());
512 extParamInfos.push_back(extParamInfo);
519 if (
getContext().getLangOpts().ObjCAutoRefCount &&
520 MD->
hasAttr<NSReturnsRetainedAttr>())
546 if (isa<CXXConstructorDecl>(GD.
getDecl()) ||
547 isa<CXXDestructorDecl>(GD.
getDecl()))
560 assert(MD->
isVirtual() &&
"only methods have thunks");
577 ArgTys.push_back(*FTP->param_type_begin());
579 ArgTys.push_back(Context.
IntTy);
594 unsigned numExtraRequiredArgs,
596 assert(args.size() >= numExtraRequiredArgs);
606 if (proto->isVariadic())
609 if (proto->hasExtParameterInfos())
619 cast<FunctionNoProtoType>(fnType))) {
625 for (
const auto &arg : args)
630 paramInfos, required);
642 chainCall ? 1 : 0, chainCall);
671 for (
const auto &Arg : args)
704 unsigned numPrefixArgs) {
705 assert(numPrefixArgs + 1 <= args.size() &&
706 "Emitting a call with less args than the required prefix?");
718 paramInfos, required);
730 assert(signature.
arg_size() <= args.size());
731 if (signature.
arg_size() == args.size())
736 if (!sigParamInfos.empty()) {
737 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
738 paramInfos.resize(args.size());
770 assert(llvm::all_of(argTypes,
774 llvm::FoldingSetNodeID ID;
779 bool isDelegateCall =
782 info, paramInfos, required, resultType, argTypes);
784 void *insertPos =
nullptr;
785 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
793 info, paramInfos, resultType, argTypes, required);
794 FunctionInfos.InsertNode(FI, insertPos);
796 bool inserted = FunctionsBeingProcessed.insert(FI).second;
798 assert(inserted &&
"Recursively being processed?");
801 if (CC == llvm::CallingConv::SPIR_KERNEL) {
819 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
822 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
823 assert(erased &&
"Not in set?");
829 bool chainCall,
bool delegateCall,
835 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
840 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
841 argTypes.size() + 1, paramInfos.size()));
844 FI->CallingConvention = llvmCC;
845 FI->EffectiveCallingConvention = llvmCC;
846 FI->ASTCallingConvention = info.
getCC();
847 FI->InstanceMethod = instanceMethod;
848 FI->ChainCall = chainCall;
849 FI->DelegateCall = delegateCall;
855 FI->Required = required;
858 FI->ArgStruct =
nullptr;
859 FI->ArgStructAlign = 0;
860 FI->NumArgs = argTypes.size();
861 FI->HasExtParameterInfos = !paramInfos.empty();
862 FI->getArgsBuffer()[0].
type = resultType;
863 FI->MaxVectorWidth = 0;
864 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
865 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
866 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
867 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
877struct TypeExpansion {
878 enum TypeExpansionKind {
890 const TypeExpansionKind
Kind;
892 TypeExpansion(TypeExpansionKind K) :
Kind(K) {}
893 virtual ~TypeExpansion() {}
896struct ConstantArrayExpansion : TypeExpansion {
900 ConstantArrayExpansion(
QualType EltTy, uint64_t NumElts)
901 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
902 static bool classof(
const TypeExpansion *TE) {
903 return TE->Kind == TEK_ConstantArray;
907struct RecordExpansion : TypeExpansion {
914 : TypeExpansion(TEK_Record), Bases(
std::move(Bases)),
915 Fields(
std::move(Fields)) {}
916 static bool classof(
const TypeExpansion *TE) {
917 return TE->Kind == TEK_Record;
921struct ComplexExpansion : TypeExpansion {
925 static bool classof(
const TypeExpansion *TE) {
930struct NoExpansion : TypeExpansion {
931 NoExpansion() : TypeExpansion(TEK_None) {}
932 static bool classof(
const TypeExpansion *TE) {
933 return TE->Kind == TEK_None;
938static std::unique_ptr<TypeExpansion>
941 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
949 "Cannot expand structure with flexible array.");
956 for (
const auto *FD : RD->
fields()) {
957 if (FD->isZeroLengthBitField(Context))
959 assert(!FD->isBitField() &&
960 "Cannot expand structure with bit-field members.");
962 if (UnionSize < FieldSize) {
963 UnionSize = FieldSize;
968 Fields.push_back(LargestFD);
970 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
971 assert(!CXXRD->isDynamicClass() &&
972 "cannot expand vtable pointers in dynamic classes");
973 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
976 for (
const auto *FD : RD->
fields()) {
977 if (FD->isZeroLengthBitField(Context))
979 assert(!FD->isBitField() &&
980 "Cannot expand structure with bit-field members.");
981 Fields.push_back(FD);
984 return std::make_unique<RecordExpansion>(std::move(Bases),
988 return std::make_unique<ComplexExpansion>(CT->getElementType());
990 return std::make_unique<NoExpansion>();
995 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
998 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1000 for (
auto BS : RExp->Bases)
1002 for (
auto FD : RExp->Fields)
1006 if (isa<ComplexExpansion>(Exp.get()))
1008 assert(isa<NoExpansion>(Exp.get()));
1016 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1017 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1020 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1021 for (
auto BS : RExp->Bases)
1023 for (
auto FD : RExp->Fields)
1025 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1030 assert(isa<NoExpansion>(Exp.get()));
1036 ConstantArrayExpansion *CAE,
1038 llvm::function_ref<
void(
Address)> Fn) {
1039 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1046 llvm::Function::arg_iterator &AI) {
1048 "Unexpected non-simple lvalue during struct expansion.");
1051 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1054 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1055 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1057 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1067 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1069 for (
auto FD : RExp->Fields) {
1072 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1074 }
else if (isa<ComplexExpansion>(Exp.get())) {
1075 auto realValue = &*AI++;
1076 auto imagValue = &*AI++;
1081 assert(isa<NoExpansion>(Exp.get()));
1082 llvm::Value *Arg = &*AI++;
1089 if (Arg->getType()->isPointerTy()) {
1098void CodeGenFunction::ExpandTypeToArgs(
1102 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1106 *
this, CAExp, Addr, [&](
Address EltAddr) {
1110 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1113 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1124 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1129 for (
auto FD : RExp->Fields) {
1132 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1135 }
else if (isa<ComplexExpansion>(Exp.get())) {
1137 IRCallArgs[IRCallArgPos++] = CV.first;
1138 IRCallArgs[IRCallArgPos++] = CV.second;
1140 assert(isa<NoExpansion>(Exp.get()));
1142 assert(RV.isScalar() &&
1143 "Unexpected non-scalar rvalue during struct expansion.");
1146 llvm::Value *
V = RV.getScalarVal();
1147 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1148 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1149 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1151 IRCallArgs[IRCallArgPos++] =
V;
1159 const Twine &Name =
"tmp") {
1173 llvm::StructType *SrcSTy,
1176 if (SrcSTy->getNumElements() == 0)
return SrcPtr;
1184 uint64_t FirstEltSize =
1186 if (FirstEltSize < DstSize &&
1195 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1211 if (Val->getType() == Ty)
1214 if (isa<llvm::PointerType>(Val->getType())) {
1216 if (isa<llvm::PointerType>(Ty))
1217 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1223 llvm::Type *DestIntTy = Ty;
1224 if (isa<llvm::PointerType>(DestIntTy))
1227 if (Val->getType() != DestIntTy) {
1229 if (DL.isBigEndian()) {
1232 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1233 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1235 if (SrcSize > DstSize) {
1236 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1237 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1239 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1240 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1244 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1248 if (isa<llvm::PointerType>(Ty))
1249 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1272 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1274 DstSize.getFixedValue(), CGF);
1282 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1283 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1289 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1290 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1304 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1305 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1308 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1309 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
1310 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1311 ScalableDstTy = llvm::ScalableVectorType::get(
1312 FixedSrcTy->getElementType(),
1313 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
1315 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1317 auto *UndefVec = llvm::UndefValue::get(ScalableDstTy);
1318 auto *Zero = llvm::Constant::getNullValue(CGF.
CGM.
Int64Ty);
1320 ScalableDstTy, UndefVec, Load, Zero,
"cast.scalable");
1321 if (ScalableDstTy != Ty)
1334 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1339 llvm::TypeSize DstSize,
1340 bool DstIsVolatile) {
1344 llvm::Type *SrcTy = Src->getType();
1351 if (llvm::StructType *DstSTy =
1353 assert(!SrcSize.isScalable());
1355 SrcSize.getFixedValue(), *
this);
1359 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1360 if (SrcTy->isIntegerTy() && Dst.
getElementType()->isPointerTy() &&
1365 }
else if (llvm::StructType *STy =
1366 dyn_cast<llvm::StructType>(Src->getType())) {
1369 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1371 llvm::Value *Elt =
Builder.CreateExtractValue(Src, i);
1377 }
else if (SrcTy->isIntegerTy()) {
1379 llvm::Type *DstIntTy =
Builder.getIntNTy(DstSize.getFixedValue() * 8);
1417class ClangToLLVMArgMapping {
1418 static const unsigned InvalidIndex = ~0
U;
1419 unsigned InallocaArgNo;
1421 unsigned TotalIRArgs;
1425 unsigned PaddingArgIndex;
1428 unsigned FirstArgIndex;
1429 unsigned NumberOfArgs;
1432 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1440 bool OnlyRequiredArgs =
false)
1441 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1442 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1443 construct(Context, FI, OnlyRequiredArgs);
1446 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1447 unsigned getInallocaArgNo()
const {
1448 assert(hasInallocaArg());
1449 return InallocaArgNo;
1452 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1453 unsigned getSRetArgNo()
const {
1454 assert(hasSRetArg());
1458 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1460 bool hasPaddingArg(
unsigned ArgNo)
const {
1461 assert(ArgNo < ArgInfo.size());
1462 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1464 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1465 assert(hasPaddingArg(ArgNo));
1466 return ArgInfo[ArgNo].PaddingArgIndex;
1471 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1472 assert(ArgNo < ArgInfo.size());
1473 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1474 ArgInfo[ArgNo].NumberOfArgs);
1479 bool OnlyRequiredArgs);
1482void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1484 bool OnlyRequiredArgs) {
1485 unsigned IRArgNo = 0;
1486 bool SwapThisWithSRet =
false;
1491 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1502 auto &IRArgs = ArgInfo[ArgNo];
1505 IRArgs.PaddingArgIndex = IRArgNo++;
1511 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1513 IRArgs.NumberOfArgs = STy->getNumElements();
1515 IRArgs.NumberOfArgs = 1;
1521 IRArgs.NumberOfArgs = 1;
1526 IRArgs.NumberOfArgs = 0;
1536 if (IRArgs.NumberOfArgs > 0) {
1537 IRArgs.FirstArgIndex = IRArgNo;
1538 IRArgNo += IRArgs.NumberOfArgs;
1543 if (IRArgNo == 1 && SwapThisWithSRet)
1546 assert(ArgNo == ArgInfo.size());
1549 InallocaArgNo = IRArgNo++;
1551 TotalIRArgs = IRArgNo;
1559 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1574 switch (BT->getKind()) {
1577 case BuiltinType::Float:
1579 case BuiltinType::Double:
1581 case BuiltinType::LongDouble:
1592 if (BT->getKind() == BuiltinType::LongDouble)
1608 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1610 assert(Inserted &&
"Recursively being processed?");
1612 llvm::Type *resultType =
nullptr;
1617 llvm_unreachable(
"Invalid ABI kind for return argument");
1629 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1645 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1649 if (IRFunctionArgs.hasSRetArg()) {
1652 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1657 if (IRFunctionArgs.hasInallocaArg())
1658 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1665 for (; it != ie; ++it, ++ArgNo) {
1669 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1670 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1673 unsigned FirstIRArg, NumIRArgs;
1674 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1679 assert(NumIRArgs == 0);
1683 assert(NumIRArgs == 1);
1685 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1689 assert(NumIRArgs == 1);
1690 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1698 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1700 assert(NumIRArgs == st->getNumElements());
1701 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1702 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1704 assert(NumIRArgs == 1);
1705 ArgTypes[FirstIRArg] = argType;
1711 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1713 *ArgTypesIter++ = EltTy;
1715 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1720 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1722 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1727 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1728 assert(Erased &&
"Not in set?");
1730 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1744 llvm::AttrBuilder &FuncAttrs,
1751 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1755 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1757 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1761 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1763 FuncAttrs.addAttribute(
"aarch64_in_za");
1765 FuncAttrs.addAttribute(
"aarch64_out_za");
1767 FuncAttrs.addAttribute(
"aarch64_inout_za");
1771 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1773 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1775 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1777 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1781 const Decl *Callee) {
1787 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1788 AA->getAssumption().split(Attrs,
",");
1791 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1792 llvm::join(Attrs.begin(), Attrs.end(),
","));
1801 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1802 return ClassDecl->hasTrivialDestructor();
1808 const Decl *TargetDecl) {
1814 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1818 if (!
Module.getLangOpts().CPlusPlus)
1821 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1822 if (FDecl->isExternC())
1824 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1826 if (VDecl->isExternC())
1834 return Module.getCodeGenOpts().StrictReturn ||
1835 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
1836 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1843 llvm::DenormalMode FP32DenormalMode,
1844 llvm::AttrBuilder &FuncAttrs) {
1845 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1846 FuncAttrs.addAttribute(
"denormal-fp-math", FPDenormalMode.str());
1848 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1849 FuncAttrs.addAttribute(
"denormal-fp-math-f32", FP32DenormalMode.str());
1857 llvm::AttrBuilder &FuncAttrs) {
1863 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
1865 llvm::AttrBuilder &FuncAttrs) {
1868 if (CodeGenOpts.OptimizeSize)
1869 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1870 if (CodeGenOpts.OptimizeSize == 2)
1871 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1874 if (CodeGenOpts.DisableRedZone)
1875 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1876 if (CodeGenOpts.IndirectTlsSegRefs)
1877 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
1878 if (CodeGenOpts.NoImplicitFloat)
1879 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1881 if (AttrOnCallSite) {
1886 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1888 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
1890 switch (CodeGenOpts.getFramePointer()) {
1897 FuncAttrs.addAttribute(
"frame-pointer",
1899 CodeGenOpts.getFramePointer()));
1902 if (CodeGenOpts.LessPreciseFPMAD)
1903 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
1905 if (CodeGenOpts.NullPointerIsValid)
1906 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1909 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
1913 if (LangOpts.NoHonorInfs)
1914 FuncAttrs.addAttribute(
"no-infs-fp-math",
"true");
1915 if (LangOpts.NoHonorNaNs)
1916 FuncAttrs.addAttribute(
"no-nans-fp-math",
"true");
1917 if (LangOpts.ApproxFunc)
1918 FuncAttrs.addAttribute(
"approx-func-fp-math",
"true");
1919 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
1920 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
1921 (LangOpts.getDefaultFPContractMode() ==
1923 LangOpts.getDefaultFPContractMode() ==
1925 FuncAttrs.addAttribute(
"unsafe-fp-math",
"true");
1926 if (CodeGenOpts.SoftFloat)
1927 FuncAttrs.addAttribute(
"use-soft-float",
"true");
1928 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
1929 llvm::utostr(CodeGenOpts.SSPBufferSize));
1930 if (LangOpts.NoSignedZero)
1931 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
1934 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
1935 if (!Recips.empty())
1936 FuncAttrs.addAttribute(
"reciprocal-estimates",
1937 llvm::join(Recips,
","));
1941 FuncAttrs.addAttribute(
"prefer-vector-width",
1944 if (CodeGenOpts.StackRealignment)
1945 FuncAttrs.addAttribute(
"stackrealign");
1946 if (CodeGenOpts.Backchain)
1947 FuncAttrs.addAttribute(
"backchain");
1948 if (CodeGenOpts.EnableSegmentedStacks)
1949 FuncAttrs.addAttribute(
"split-stack");
1951 if (CodeGenOpts.SpeculativeLoadHardening)
1952 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1955 switch (CodeGenOpts.getZeroCallUsedRegs()) {
1956 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
1957 FuncAttrs.removeAttribute(
"zero-call-used-regs");
1959 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
1960 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
1962 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
1963 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
1965 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
1966 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
1968 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
1969 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
1971 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
1972 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
1974 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
1975 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
1977 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
1978 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
1980 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
1981 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
1992 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1997 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
1998 LangOpts.SYCLIsDevice) {
1999 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2002 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2003 FuncAttrs.addAttribute(
"save-reg-params");
2006 StringRef Var,
Value;
2008 FuncAttrs.addAttribute(Var,
Value);
2022 const llvm::Function &F,
2024 auto FFeatures = F.getFnAttribute(
"target-features");
2026 llvm::StringSet<> MergedNames;
2028 MergedFeatures.reserve(TargetOpts.
Features.size());
2030 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2031 for (StringRef Feature : FeatureRange) {
2032 if (Feature.empty())
2034 assert(Feature[0] ==
'+' || Feature[0] ==
'-');
2035 StringRef Name = Feature.drop_front(1);
2036 bool Merged = !MergedNames.insert(Name).second;
2038 MergedFeatures.push_back(Feature);
2042 if (FFeatures.isValid())
2043 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2044 AddUnmergedFeatures(TargetOpts.
Features);
2046 if (!MergedFeatures.empty()) {
2047 llvm::sort(MergedFeatures);
2048 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2055 bool WillInternalize) {
2057 llvm::AttrBuilder FuncAttrs(F.getContext());
2060 if (!TargetOpts.
CPU.empty())
2061 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2062 if (!TargetOpts.
TuneCPU.empty())
2063 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2066 CodeGenOpts, LangOpts,
2069 if (!WillInternalize && F.isInterposable()) {
2074 F.addFnAttrs(FuncAttrs);
2078 llvm::AttributeMask AttrsToRemove;
2080 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2081 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2082 llvm::DenormalMode Merged =
2086 if (DenormModeToMergeF32.isValid()) {
2091 if (Merged == llvm::DenormalMode::getDefault()) {
2092 AttrsToRemove.addAttribute(
"denormal-fp-math");
2093 }
else if (Merged != DenormModeToMerge) {
2095 FuncAttrs.addAttribute(
"denormal-fp-math",
2099 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2100 AttrsToRemove.addAttribute(
"denormal-fp-math-f32");
2101 }
else if (MergedF32 != DenormModeToMergeF32) {
2103 FuncAttrs.addAttribute(
"denormal-fp-math-f32",
2107 F.removeFnAttrs(AttrsToRemove);
2112 F.addFnAttrs(FuncAttrs);
2115void CodeGenModule::getTrivialDefaultFunctionAttributes(
2116 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2117 llvm::AttrBuilder &FuncAttrs) {
2118 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone,
getCodeGenOpts(),
2123void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2125 bool AttrOnCallSite,
2126 llvm::AttrBuilder &FuncAttrs) {
2127 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2131 if (!AttrOnCallSite)
2136 llvm::AttrBuilder &attrs) {
2137 getDefaultFunctionAttributes(
"",
false,
2139 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2144 const NoBuiltinAttr *NBA =
nullptr) {
2145 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2147 AttributeName +=
"no-builtin-";
2148 AttributeName += BuiltinName;
2149 FuncAttrs.addAttribute(AttributeName);
2153 if (LangOpts.NoBuiltin) {
2155 FuncAttrs.addAttribute(
"no-builtins");
2169 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2170 FuncAttrs.addAttribute(
"no-builtins");
2175 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2179 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2180 bool CheckCoerce =
true) {
2181 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2187 if (!DL.typeSizeEqualsStoreSize(Ty))
2194 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2195 DL.getTypeSizeInBits(Ty)))
2219 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2221 if (
const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2230 unsigned NumRequiredArgs,
unsigned ArgNo) {
2231 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2236 if (ArgNo >= NumRequiredArgs)
2240 if (ArgNo < FD->getNumParams()) {
2241 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2242 if (Param && Param->
hasAttr<MaybeUndefAttr>())
2259 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2262 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2264 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2265 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
2274 llvm::FPClassTest Mask = llvm::fcNone;
2275 if (LangOpts.NoHonorInfs)
2276 Mask |= llvm::fcInf;
2277 if (LangOpts.NoHonorNaNs)
2278 Mask |= llvm::fcNan;
2284 llvm::AttributeList &Attrs) {
2285 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2286 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2287 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2313 llvm::AttributeList &AttrList,
2315 bool AttrOnCallSite,
bool IsThunk) {
2323 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2325 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2337 bool HasOptnone =
false;
2339 const NoBuiltinAttr *NBA =
nullptr;
2343 auto AddPotentialArgAccess = [&]() {
2344 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2346 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2347 llvm::MemoryEffects::argMemOnly());
2354 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2355 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2356 if (TargetDecl->
hasAttr<NoThrowAttr>())
2357 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2358 if (TargetDecl->
hasAttr<NoReturnAttr>())
2359 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2360 if (TargetDecl->
hasAttr<ColdAttr>())
2361 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2362 if (TargetDecl->
hasAttr<HotAttr>())
2363 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2364 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2365 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2366 if (TargetDecl->
hasAttr<ConvergentAttr>())
2367 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2369 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2372 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2374 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2376 (Kind == OO_New || Kind == OO_Array_New))
2377 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2380 const bool IsVirtualCall = MD && MD->
isVirtual();
2383 if (!(AttrOnCallSite && IsVirtualCall)) {
2384 if (Fn->isNoReturn())
2385 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2386 NBA = Fn->getAttr<NoBuiltinAttr>();
2390 if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
2393 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2394 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2398 if (TargetDecl->
hasAttr<ConstAttr>()) {
2399 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2400 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2403 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2404 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2405 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2406 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2408 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2409 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2410 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2411 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2413 if (TargetDecl->
hasAttr<RestrictAttr>())
2414 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2415 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2416 !CodeGenOpts.NullPointerIsValid)
2417 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2418 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2419 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2420 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2421 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2422 if (TargetDecl->
hasAttr<LeafAttr>())
2423 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2425 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2426 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2427 std::optional<unsigned> NumElemsParam;
2428 if (AllocSize->getNumElemsParam().isValid())
2429 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2430 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2434 if (TargetDecl->
hasAttr<OpenCLKernelAttr>()) {
2437 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2444 FuncAttrs.addAttribute(
2445 "uniform-work-group-size",
2446 llvm::toStringRef(
getLangOpts().OffloadUniformBlock));
2450 if (TargetDecl->
hasAttr<CUDAGlobalAttr>() &&
2452 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2454 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2455 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2467 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2472 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2473 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2474 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2475 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2476 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2477 FuncAttrs.removeAttribute(
"split-stack");
2478 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2481 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2482 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2483 FuncAttrs.addAttribute(
2484 "zero-call-used-regs",
2485 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2492 if (CodeGenOpts.NoPLT) {
2493 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2494 if (!Fn->isDefined() && !AttrOnCallSite) {
2495 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2500 if (TargetDecl->
hasAttr<NoConvergentAttr>())
2501 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2506 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2507 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2508 if (!FD->isExternallyVisible())
2509 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2516 if (!AttrOnCallSite) {
2517 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2518 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2521 auto shouldDisableTailCalls = [&] {
2523 if (CodeGenOpts.DisableTailCalls)
2529 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2530 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2533 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2534 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2535 if (!BD->doesNotEscape())
2541 if (shouldDisableTailCalls())
2542 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2546 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2550 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2557 if (CodeGenOpts.EnableNoundefAttrs &&
2561 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2567 RetAttrs.addAttribute(llvm::Attribute::SExt);
2569 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2573 RetAttrs.addAttribute(llvm::Attribute::InReg);
2585 AddPotentialArgAccess();
2594 llvm_unreachable(
"Invalid ABI kind for return argument");
2602 RetAttrs.addDereferenceableAttr(
2604 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2605 !CodeGenOpts.NullPointerIsValid)
2606 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2608 llvm::Align Alignment =
2610 RetAttrs.addAlignmentAttr(Alignment);
2615 bool hasUsedSRet =
false;
2619 if (IRFunctionArgs.hasSRetArg()) {
2621 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2622 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2623 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2626 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2628 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2633 if (IRFunctionArgs.hasInallocaArg()) {
2636 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2645 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2647 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2654 if (!CodeGenOpts.NullPointerIsValid &&
2656 Attrs.addAttribute(llvm::Attribute::NonNull);
2663 Attrs.addDereferenceableOrNullAttr(
2669 llvm::Align Alignment =
2673 Attrs.addAlignmentAttr(Alignment);
2675 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2681 I !=
E; ++I, ++ArgNo) {
2687 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2689 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2690 llvm::AttributeSet::get(
2692 llvm::AttrBuilder(
getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2697 if (CodeGenOpts.EnableNoundefAttrs &&
2699 Attrs.addAttribute(llvm::Attribute::NoUndef);
2708 Attrs.addAttribute(llvm::Attribute::SExt);
2710 Attrs.addAttribute(llvm::Attribute::ZExt);
2714 Attrs.addAttribute(llvm::Attribute::Nest);
2716 Attrs.addAttribute(llvm::Attribute::InReg);
2717 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2724 Attrs.addAttribute(llvm::Attribute::InReg);
2727 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2730 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2731 Decl->getArgPassingRestrictions() ==
2735 Attrs.addAttribute(llvm::Attribute::NoAlias);
2760 AddPotentialArgAccess();
2765 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
2776 AddPotentialArgAccess();
2783 Attrs.addDereferenceableAttr(
2785 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2786 !CodeGenOpts.NullPointerIsValid)
2787 Attrs.addAttribute(llvm::Attribute::NonNull);
2789 llvm::Align Alignment =
2791 Attrs.addAlignmentAttr(Alignment);
2799 if (TargetDecl && TargetDecl->
hasAttr<OpenCLKernelAttr>() &&
2803 llvm::Align Alignment =
2805 Attrs.addAlignmentAttr(Alignment);
2817 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
2822 Attrs.addAttribute(llvm::Attribute::NoAlias);
2826 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2828 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2829 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2835 Attrs.addAttribute(llvm::Attribute::SwiftError);
2839 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2843 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2848 Attrs.addAttribute(llvm::Attribute::NoCapture);
2850 if (Attrs.hasAttributes()) {
2851 unsigned FirstIRArg, NumIRArgs;
2852 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2853 for (
unsigned i = 0; i < NumIRArgs; i++)
2854 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2860 AttrList = llvm::AttributeList::get(
2869 llvm::Value *value) {
2870 llvm::Type *varType = CGF.
ConvertType(var->getType());
2874 if (value->getType() == varType)
return value;
2876 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2877 &&
"unexpected promotion type");
2879 if (isa<llvm::IntegerType>(varType))
2880 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
2882 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
2888 QualType ArgType,
unsigned ArgNo) {
2900 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
2907 if (NNAttr->isNonNull(ArgNo))
2937 if (FD->hasImplicitReturnZero()) {
2938 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2940 llvm::Constant*
Zero = llvm::Constant::getNullValue(LLVMTy);
2949 assert(
Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2954 if (IRFunctionArgs.hasInallocaArg())
2955 ArgStruct =
Address(
Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2959 if (IRFunctionArgs.hasSRetArg()) {
2960 auto AI =
Fn->getArg(IRFunctionArgs.getSRetArgNo());
2961 AI->setName(
"agg.result");
2962 AI->addAttr(llvm::Attribute::NoAlias);
2969 ArgVals.reserve(Args.size());
2975 assert(FI.
arg_size() == Args.size() &&
2976 "Mismatch between function signature & arguments.");
2979 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2980 i != e; ++i, ++info_it, ++ArgNo) {
2985 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2993 unsigned FirstIRArg, NumIRArgs;
2994 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2998 assert(NumIRArgs == 0);
3011 assert(NumIRArgs == 1);
3035 ParamAddr = AlignedTemp;
3052 auto AI =
Fn->getArg(FirstIRArg);
3060 assert(NumIRArgs == 1);
3062 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3065 PVD->getFunctionScopeIndex()) &&
3067 AI->addAttr(llvm::Attribute::NonNull);
3069 QualType OTy = PVD->getOriginalType();
3070 if (
const auto *ArrTy =
3077 QualType ETy = ArrTy->getElementType();
3078 llvm::Align Alignment =
3080 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3081 uint64_t ArrSize = ArrTy->getZExtSize();
3085 Attrs.addDereferenceableAttr(
3086 getContext().getTypeSizeInChars(ETy).getQuantity() *
3088 AI->addAttrs(Attrs);
3089 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3092 AI->addAttr(llvm::Attribute::NonNull);
3095 }
else if (
const auto *ArrTy =
3101 QualType ETy = ArrTy->getElementType();
3102 llvm::Align Alignment =
3104 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3105 if (!
getTypes().getTargetAddressSpace(ETy) &&
3107 AI->addAttr(llvm::Attribute::NonNull);
3112 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3115 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3116 if (AVAttr && !
SanOpts.
has(SanitizerKind::Alignment)) {
3120 llvm::ConstantInt *AlignmentCI =
3123 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3124 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3125 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3126 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(
3127 llvm::Align(AlignmentInt)));
3134 AI->addAttr(llvm::Attribute::NoAlias);
3142 assert(NumIRArgs == 1);
3146 llvm::Value *
V = AI;
3154 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3177 if (
V->getType() != LTy)
3188 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3189 llvm::Value *Coerced =
Fn->getArg(FirstIRArg);
3190 if (
auto *VecTyFrom =
3191 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
3194 if (VecTyFrom->getElementType()->isIntegerTy(1) &&
3195 VecTyFrom->getElementCount().isKnownMultipleOf(8) &&
3196 VecTyTo->getElementType() ==
Builder.getInt8Ty()) {
3197 VecTyFrom = llvm::ScalableVectorType::get(
3198 VecTyTo->getElementType(),
3199 VecTyFrom->getElementCount().getKnownMinValue() / 8);
3200 Coerced =
Builder.CreateBitCast(Coerced, VecTyFrom);
3202 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
3205 assert(NumIRArgs == 1);
3206 Coerced->setName(Arg->
getName() +
".coerce");
3208 VecTyTo, Coerced, Zero,
"cast.fixed")));
3214 llvm::StructType *STy =
3217 STy->getNumElements() > 1) {
3218 [[maybe_unused]] llvm::TypeSize StructSize =
3220 [[maybe_unused]] llvm::TypeSize PtrElementSize =
3222 if (STy->containsHomogeneousScalableVectorTypes()) {
3223 assert(StructSize == PtrElementSize &&
3224 "Only allow non-fractional movement of structure with"
3225 "homogeneous scalable vector type");
3241 STy->getNumElements() > 1) {
3243 llvm::TypeSize PtrElementSize =
3245 if (StructSize.isScalable()) {
3246 assert(STy->containsHomogeneousScalableVectorTypes() &&
3247 "ABI only supports structure with homogeneous scalable vector "
3249 assert(StructSize == PtrElementSize &&
3250 "Only allow non-fractional movement of structure with"
3251 "homogeneous scalable vector type");
3252 assert(STy->getNumElements() == NumIRArgs);
3254 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3255 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3256 auto *AI =
Fn->getArg(FirstIRArg + i);
3257 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3259 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3264 uint64_t SrcSize = StructSize.getFixedValue();
3265 uint64_t DstSize = PtrElementSize.getFixedValue();
3268 if (SrcSize <= DstSize) {
3275 assert(STy->getNumElements() == NumIRArgs);
3276 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3277 auto AI =
Fn->getArg(FirstIRArg + i);
3278 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3283 if (SrcSize > DstSize) {
3289 assert(NumIRArgs == 1);
3290 auto AI =
Fn->getArg(FirstIRArg);
3291 AI->setName(Arg->
getName() +
".coerce");
3294 llvm::TypeSize::getFixed(
3295 getContext().getTypeSizeInChars(Ty).getQuantity() -
3321 unsigned argIndex = FirstIRArg;
3322 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3323 llvm::Type *eltType = coercionType->getElementType(i);
3328 auto elt =
Fn->getArg(argIndex++);
3331 assert(argIndex == FirstIRArg + NumIRArgs);
3343 auto FnArgIter =
Fn->arg_begin() + FirstIRArg;
3344 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3345 assert(FnArgIter ==
Fn->arg_begin() + FirstIRArg + NumIRArgs);
3346 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3347 auto AI =
Fn->getArg(FirstIRArg + i);
3348 AI->setName(Arg->
getName() +
"." + Twine(i));
3354 assert(NumIRArgs == 0);
3366 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3367 for (
int I = Args.size() - 1; I >= 0; --I)
3370 for (
unsigned I = 0,
E = Args.size(); I !=
E; ++I)
3376 while (insn->use_empty()) {
3377 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3378 if (!bitcast)
return;
3381 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3382 bitcast->eraseFromParent();
3388 llvm::Value *result) {
3390 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3391 if (BB->empty())
return nullptr;
3392 if (&BB->back() != result)
return nullptr;
3394 llvm::Type *resultType = result->getType();
3397 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3403 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3406 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3409 if (generator->getNextNode() != bitcast)
3412 InstsToKill.push_back(bitcast);
3419 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3420 if (!call)
return nullptr;
3422 bool doRetainAutorelease;
3425 doRetainAutorelease =
true;
3426 }
else if (call->getCalledOperand() ==
3428 doRetainAutorelease =
false;
3436 llvm::Instruction *prev = call->getPrevNode();
3438 if (isa<llvm::BitCastInst>(prev)) {
3439 prev = prev->getPrevNode();
3442 assert(isa<llvm::CallInst>(prev));
3443 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3445 InstsToKill.push_back(prev);
3451 result = call->getArgOperand(0);
3452 InstsToKill.push_back(call);
3456 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3457 if (!bitcast->hasOneUse())
break;
3458 InstsToKill.push_back(bitcast);
3459 result = bitcast->getOperand(0);
3463 for (
auto *I : InstsToKill)
3464 I->eraseFromParent();
3467 if (doRetainAutorelease)
3471 return CGF.
Builder.CreateBitCast(result, resultType);
3476 llvm::Value *result) {
3479 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3480 if (!method)
return nullptr;
3486 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3487 if (!retainCall || retainCall->getCalledOperand() !=
3492 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3493 llvm::LoadInst *load =
3494 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3495 if (!load || load->isAtomic() || load->isVolatile() ||
3502 llvm::Type *resultType = result->getType();
3504 assert(retainCall->use_empty());
3505 retainCall->eraseFromParent();
3508 return CGF.
Builder.CreateBitCast(load, resultType);
3515 llvm::Value *result) {
3538 auto GetStoreIfValid = [&CGF,
3539 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3540 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3541 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3547 assert(!SI->isAtomic() &&
3555 if (!ReturnValuePtr->hasOneUse()) {
3556 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3557 if (IP->empty())
return nullptr;
3561 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3562 if (isa<llvm::BitCastInst>(&I))
3564 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
3565 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3568 return GetStoreIfValid(&I);
3573 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3574 if (!store)
return nullptr;
3578 llvm::BasicBlock *StoreBB = store->getParent();
3579 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3581 while (IP != StoreBB) {
3582 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3598 int BitWidth,
int CharWidth) {
3599 assert(CharWidth <= 64);
3600 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3603 if (BitOffset >= CharWidth) {
3604 Pos += BitOffset / CharWidth;
3605 BitOffset = BitOffset % CharWidth;
3608 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
3609 if (BitOffset + BitWidth >= CharWidth) {
3610 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3611 BitWidth -= CharWidth - BitOffset;
3615 while (BitWidth >= CharWidth) {
3617 BitWidth -= CharWidth;
3621 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3629 int StorageSize,
int BitOffset,
int BitWidth,
3630 int CharWidth,
bool BigEndian) {
3633 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3636 std::reverse(TmpBits.begin(), TmpBits.end());
3638 for (uint64_t
V : TmpBits)
3639 Bits[StorageOffset++] |=
V;
3670 BFI.
Size, CharWidth,
3692 auto Src = TmpBits.begin();
3693 auto Dst = Bits.begin() + Offset + I * Size;
3694 for (
int J = 0; J < Size; ++J)
3714 std::fill_n(Bits.begin() + Offset, Size,
3719 int Pos,
int Size,
int CharWidth,
3724 for (
auto P = Bits.begin() + Pos,
E = Bits.begin() + Pos + Size;
P !=
E;
3726 Mask = (Mask << CharWidth) | *
P;
3728 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3730 Mask = (Mask << CharWidth) | *--
P;
3739 llvm::IntegerType *ITy,
3741 assert(Src->getType() == ITy);
3742 assert(ITy->getScalarSizeInBits() <= 64);
3745 int Size = DataLayout.getTypeStoreSize(ITy);
3753 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
3759 llvm::ArrayType *ATy,
3762 int Size = DataLayout.getTypeStoreSize(ATy);
3769 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3771 llvm::Value *R = llvm::PoisonValue::get(ATy);
3772 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3774 DataLayout.isBigEndian());
3775 MaskIndex += CharsPerElt;
3776 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
3777 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
3778 R =
Builder.CreateInsertValue(R, T1, I);
3805 llvm::DebugLoc RetDbgLoc;
3806 llvm::Value *RV =
nullptr;
3816 llvm::Function::arg_iterator EI =
CurFn->arg_end();
3818 llvm::Value *ArgStruct = &*EI;
3822 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3828 auto AI =
CurFn->arg_begin();
3868 if (llvm::StoreInst *SI =
3874 RetDbgLoc = SI->getDebugLoc();
3876 RV = SI->getValueOperand();
3877 SI->eraseFromParent();
3900 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
3901 RT = FD->getReturnType();
3902 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
3903 RT = MD->getReturnType();
3907 llvm_unreachable(
"Unexpected function/method type");
3927 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3934 results.push_back(elt);
3938 if (results.size() == 1) {
3946 RV = llvm::PoisonValue::get(returnType);
3947 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
3948 RV =
Builder.CreateInsertValue(RV, results[i], i);
3955 llvm_unreachable(
"Invalid ABI kind for return argument");
3958 llvm::Instruction *
Ret;
3964 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3975 Ret->setDebugLoc(std::move(RetDbgLoc));
3988 ReturnsNonNullAttr *RetNNAttr =
nullptr;
3989 if (
SanOpts.
has(SanitizerKind::ReturnsNonnullAttribute))
3992 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4000 assert(!requiresReturnValueNullabilityCheck() &&
4001 "Cannot check nullability and the nonnull attribute");
4002 AttrLoc = RetNNAttr->getLocation();
4003 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
4004 Handler = SanitizerHandler::NonnullReturn;
4006 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4007 if (
auto *TSI = DD->getTypeSourceInfo())
4009 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4010 CheckKind = SanitizerKind::NullabilityReturn;
4011 Handler = SanitizerHandler::NullabilityReturn;
4014 SanitizerScope SanScope(
this);
4021 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4022 if (requiresReturnValueNullabilityCheck())
4024 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4025 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4029 llvm::Value *Cond =
Builder.CreateIsNotNull(RV);
4031 llvm::Value *DynamicData[] = {SLocPtr};
4032 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
4052 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4053 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4080 if (
type->isReferenceType()) {
4089 param->
hasAttr<NSConsumedAttr>() &&
4090 type->isObjCRetainableType()) {
4093 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
4108 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
4110 "cleanup for callee-destructed param not recorded");
4112 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4118 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4131 "shouldn't have writeback for provably null argument");
4133 llvm::BasicBlock *contBB =
nullptr;
4139 if (!provablyNonNull) {
4144 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4153 "icr.writeback-cast");
4162 if (writeback.
ToUse) {
4187 if (!provablyNonNull)
4202 for (
const auto &I : llvm::reverse(Cleanups)) {
4204 I.IsActiveIP->eraseFromParent();
4210 if (uop->getOpcode() == UO_AddrOf)
4211 return uop->getSubExpr();
4241 llvm::PointerType *destType =
4243 llvm::Type *destElemType =
4260 CodeGenFunction::ConditionalEvaluation condEval(CGF);
4266 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4270 llvm::BasicBlock *contBB =
nullptr;
4271 llvm::BasicBlock *originBB =
nullptr;
4274 llvm::Value *finalArgument;
4278 if (provablyNonNull) {
4283 finalArgument = CGF.
Builder.CreateSelect(
4284 isNull, llvm::ConstantPointerNull::get(destType),
4290 originBB = CGF.
Builder.GetInsertBlock();
4293 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4295 condEval.begin(CGF);
4299 llvm::Value *valueToUse =
nullptr;
4307 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4324 if (shouldCopy && !provablyNonNull) {
4325 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4330 llvm::PHINode *phiToUse = CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
4332 phiToUse->addIncoming(valueToUse, copyBB);
4333 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
4335 valueToUse = phiToUse;
4349 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4355 CGF.
Builder.CreateStackRestore(StackBase);
4363 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4368 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) :
nullptr;
4369 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4372 const NonNullAttr *NNAttr =
nullptr;
4373 if (
SanOpts.
has(SanitizerKind::NonnullAttribute))
4376 bool CanCheckNullability =
false;
4377 if (
SanOpts.
has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4378 !PVD->getType()->isRecordType()) {
4379 auto Nullability = PVD->getType()->getNullability();
4380 CanCheckNullability = Nullability &&
4382 PVD->getTypeSourceInfo();
4385 if (!NNAttr && !CanCheckNullability)
4392 AttrLoc = NNAttr->getLocation();
4393 CheckKind = SanitizerKind::NonnullAttribute;
4394 Handler = SanitizerHandler::NonnullArg;
4396 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4397 CheckKind = SanitizerKind::NullabilityArg;
4398 Handler = SanitizerHandler::NullabilityArg;
4401 SanitizerScope SanScope(
this);
4403 llvm::Constant *StaticData[] = {
4405 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4407 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
4412 AbstractCallee AC,
unsigned ParmNum) {
4413 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4433 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4444 return classDecl->getTypeParamListAsWritten();
4448 return catDecl->getTypeParamList();
4458 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4459 AbstractCallee AC,
unsigned ParamsToSkip, EvaluationOrder Order) {
4462 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4463 "Can't skip parameters if type info is not provided");
4473 bool IsVariadic =
false;
4480 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4481 MD->param_type_end());
4485 ExplicitCC = FPT->getExtInfo().getCC();
4486 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4487 FPT->param_type_end());
4495 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4497 (isGenericMethod || Ty->isVariablyModifiedType() ||
4498 Ty.getNonReferenceType()->isObjCRetainableType() ||
4500 .getCanonicalType(Ty.getNonReferenceType())
4502 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4503 "type mismatch in call argument!");
4509 assert((Arg == ArgRange.end() || IsVariadic) &&
4510 "Extra arguments in non-variadic function!");
4515 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4516 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4517 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4529 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4531 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4533 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4540 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4541 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(),
T,
4542 EmittedArg.getScalarVal(),
4548 std::swap(Args.back(), *(&Args.back() - 1));
4553 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4554 "inalloca only supported on x86");
4559 size_t CallArgsStart = Args.size();
4560 for (
unsigned I = 0,
E = ArgTypes.size(); I !=
E; ++I) {
4561 unsigned Idx = LeftToRight ? I :
E - I - 1;
4563 unsigned InitialArgSize = Args.size();
4566 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4567 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4569 (isa<ObjCMethodDecl>(AC.getDecl()) &&
4571 "Argument and parameter types don't match");
4575 assert(InitialArgSize + 1 == Args.size() &&
4576 "The code below depends on only adding one arg per EmitCallArg");
4577 (void)InitialArgSize;
4580 if (!Args.back().hasLValue()) {
4581 RValue RVArg = Args.back().getKnownRValue();
4583 ParamsToSkip + Idx);
4587 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4594 std::reverse(Args.begin() + CallArgsStart, Args.end());
4602 : Addr(Addr), Ty(Ty) {}
4620struct DisableDebugLocationUpdates {
4622 bool disabledDebugInfo;
4624 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(
E) && CGF.
getDebugInfo()))
4627 ~DisableDebugLocationUpdates() {
4628 if (disabledDebugInfo)
4664 DisableDebugLocationUpdates Dis(*
this,
E);
4666 = dyn_cast<ObjCIndirectCopyRestoreExpr>(
E)) {
4672 "reference binding to unmaterialized r-value!");
4684 if (
type->isRecordType() &&
4691 bool DestroyedInCallee =
true, NeedsCleanup =
true;
4692 if (
const auto *RD =
type->getAsCXXRecordDecl())
4693 DestroyedInCallee = RD->hasNonTrivialDestructor();
4695 NeedsCleanup =
type.isDestructedType();
4697 if (DestroyedInCallee)
4704 if (DestroyedInCallee && NeedsCleanup) {
4711 llvm::Instruction *IsActive =
4718 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(
E) &&
4719 cast<CastExpr>(
E)->getCastKind() == CK_LValueToRValue &&
4720 !
type->isArrayParameterType()) {
4730QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
4734 if (!
getTarget().getTriple().isOSWindows())
4751CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4754 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
4761 const llvm::Twine &name) {
4769 const llvm::Twine &name) {
4771 for (
auto arg : args)
4772 values.push_back(
arg.emitRawPointer(*
this));
4779 const llvm::Twine &name) {
4781 call->setDoesNotThrow();
4788 const llvm::Twine &name) {
4803 if (
auto *CalleeFn = dyn_cast<llvm::Function>(
Callee->stripPointerCasts())) {
4804 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4805 auto IID = CalleeFn->getIntrinsicID();
4806 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4819 const llvm::Twine &name) {
4820 llvm::CallInst *call =
Builder.CreateCall(
4836 llvm::InvokeInst *invoke =
4842 invoke->setDoesNotReturn();
4845 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
4846 call->setDoesNotReturn();
4855 const Twine &name) {
4863 const Twine &name) {
4873 const Twine &Name) {
4878 llvm::CallBase *Inst;
4880 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
4883 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4891 AddObjCARCExceptionMetadata(Inst);
4896void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4898 DeferredReplacements.push_back(
4899 std::make_pair(llvm::WeakTrackingVH(Old), New));
4906[[nodiscard]] llvm::AttributeList
4907maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4908 const llvm::AttributeList &Attrs,
4909 llvm::Align NewAlign) {
4910 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4911 if (CurAlign >= NewAlign)
4913 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4914 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
4915 .addRetAttribute(Ctx, AlignAttr);
4918template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
4923 const AlignedAttrTy *AA =
nullptr;
4925 llvm::Value *Alignment =
nullptr;
4926 llvm::ConstantInt *OffsetCI =
nullptr;
4932 AA = FuncDecl->
getAttr<AlignedAttrTy>();
4937 [[nodiscard]] llvm::AttributeList
4938 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
4939 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
4941 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4946 if (!AlignmentCI->getValue().isPowerOf2())
4948 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4951 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4963 AA->getLocation(), Alignment, OffsetCI);
4969class AssumeAlignedAttrEmitter final
4970 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4973 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4977 Alignment = cast<llvm::ConstantInt>(CGF.
EmitScalarExpr(AA->getAlignment()));
4978 if (
Expr *Offset = AA->getOffset()) {
4980 if (OffsetCI->isNullValue())
4987class AllocAlignAttrEmitter final
4988 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4992 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4996 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5005 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5006 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5007 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
5010 unsigned MaxVectorWidth = 0;
5011 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
5012 for (
auto *I : ST->elements())
5014 return MaxVectorWidth;
5021 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5023 bool IsVirtualFunctionPointerThunk) {
5035 const Decl *TargetDecl =
Callee.getAbstractInfo().getCalleeDecl().getDecl();
5036 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5043 if (TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5044 (TargetDecl->
hasAttr<TargetAttr>() ||
5053 dyn_cast_or_null<FunctionDecl>(TargetDecl), CallArgs, RetTy);
5060 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5063 llvm::AllocaInst *AI;
5065 IP = IP->getNextNode();
5066 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
"argmem",
5072 AI->setAlignment(Align.getAsAlign());
5073 AI->setUsedWithInAlloca(
true);
5074 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5075 ArgMemory =
RawAddress(AI, ArgStruct, Align);
5078 ClangToLLVMArgMapping IRFunctionArgs(
CGM.
getContext(), CallInfo);
5085 llvm::Value *UnusedReturnSizePtr =
nullptr;
5087 if (IsVirtualFunctionPointerThunk && RetAI.
isIndirect()) {
5089 IRFunctionArgs.getSRetArgNo(),
5096 llvm::TypeSize size =
5101 if (IRFunctionArgs.hasSRetArg()) {
5102 IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
5120 assert(CallInfo.
arg_size() == CallArgs.size() &&
5121 "Mismatch between function signature & arguments.");
5124 for (CallArgList::const_iterator I = CallArgs.begin(),
E = CallArgs.end();
5125 I !=
E; ++I, ++info_it, ++ArgNo) {
5129 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5130 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5133 unsigned FirstIRArg, NumIRArgs;
5134 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5136 bool ArgHasMaybeUndefAttr =
5141 assert(NumIRArgs == 0);
5142 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86);
5143 if (I->isAggregate()) {
5145 ? I->getKnownLValue().getAddress()
5146 : I->getKnownRValue().getAggregateAddress();
5147 llvm::Instruction *Placeholder =
5152 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
5153 Builder.SetInsertPoint(Placeholder);
5166 deferPlaceholderReplacement(Placeholder, Addr.
getPointer());
5171 I->Ty,
getContext().getTypeAlignInChars(I->Ty),
5172 "indirect-arg-temp");
5173 I->copyInto(*
this, Addr);
5182 I->copyInto(*
this, Addr);
5189 assert(NumIRArgs == 1);
5190 if (I->isAggregate()) {
5200 ? I->getKnownLValue().getAddress()
5201 : I->getKnownRValue().getAggregateAddress();
5205 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5206 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5207 TD->getAllocaAddrSpace()) &&
5208 "indirect argument must be in alloca address space");
5210 bool NeedCopy =
false;
5216 }
else if (I->hasLValue()) {
5217 auto LV = I->getKnownLValue();
5223 if (!isByValOrRef ||
5228 if ((isByValOrRef &&
5236 else if ((isByValOrRef &&
5237 Addr.
getType()->getAddressSpace() != IRFuncTy->
5246 auto *
T = llvm::PointerType::get(
5252 if (ArgHasMaybeUndefAttr)
5253 Val =
Builder.CreateFreeze(Val);
5254 IRCallArgs[FirstIRArg] = Val;
5264 if (ArgHasMaybeUndefAttr)
5265 Val =
Builder.CreateFreeze(Val);
5266 IRCallArgs[FirstIRArg] = Val;
5269 llvm::TypeSize ByvalTempElementSize =
5271 llvm::Value *LifetimeSize =
5276 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
5279 I->copyInto(*
this, AI);
5284 assert(NumIRArgs == 0);
5292 assert(NumIRArgs == 1);
5294 if (!I->isAggregate())
5295 V = I->getKnownRValue().getScalarVal();
5298 I->hasLValue() ? I->getKnownLValue().getAddress()
5299 : I->getKnownRValue().getAggregateAddress());
5305 assert(!swiftErrorTemp.
isValid() &&
"multiple swifterror args");
5309 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
5314 cast<llvm::AllocaInst>(
V)->setSwiftError(
true);
5322 V->getType()->isIntegerTy())
5327 if (FirstIRArg < IRFuncTy->getNumParams() &&
5328 V->getType() != IRFuncTy->getParamType(FirstIRArg))
5329 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(FirstIRArg));
5331 if (ArgHasMaybeUndefAttr)
5333 IRCallArgs[FirstIRArg] =
V;
5337 llvm::StructType *STy =
5341 [[maybe_unused]] llvm::TypeSize SrcTypeSize =
5343 [[maybe_unused]] llvm::TypeSize DstTypeSize =
5345 if (STy->containsHomogeneousScalableVectorTypes()) {
5346 assert(SrcTypeSize == DstTypeSize &&
5347 "Only allow non-fractional movement of structure with "
5348 "homogeneous scalable vector type");
5350 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5357 if (!I->isAggregate()) {
5359 I->copyInto(*
this, Src);
5361 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5362 : I->getKnownRValue().getAggregateAddress();
5372 llvm::TypeSize SrcTypeSize =
5375 if (SrcTypeSize.isScalable()) {
5376 assert(STy->containsHomogeneousScalableVectorTypes() &&
5377 "ABI only supports structure with homogeneous scalable vector "
5379 assert(SrcTypeSize == DstTypeSize &&
5380 "Only allow non-fractional movement of structure with "
5381 "homogeneous scalable vector type");
5382 assert(NumIRArgs == STy->getNumElements());
5384 llvm::Value *StoredStructValue =
5386 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5387 llvm::Value *Extract =
Builder.CreateExtractValue(
5388 StoredStructValue, i, Src.
getName() +
".extract" + Twine(i));
5389 IRCallArgs[FirstIRArg + i] = Extract;
5392 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5393 uint64_t DstSize = DstTypeSize.getFixedValue();
5399 if (SrcSize < DstSize) {
5408 assert(NumIRArgs == STy->getNumElements());
5409 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5412 if (ArgHasMaybeUndefAttr)
5413 LI =
Builder.CreateFreeze(LI);
5414 IRCallArgs[FirstIRArg + i] = LI;
5419 assert(NumIRArgs == 1);
5427 auto *ATy = dyn_cast<llvm::ArrayType>(
Load->getType());
5428 if (ATy !=
nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
5432 if (ArgHasMaybeUndefAttr)
5434 IRCallArgs[FirstIRArg] =
Load;
5444 llvm::Value *tempSize =
nullptr;
5447 if (I->isAggregate()) {
5448 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5449 : I->getKnownRValue().getAggregateAddress();
5452 RValue RV = I->getKnownRValue();
5464 nullptr, &AllocaAddr);
5472 unsigned IRArgPos = FirstIRArg;
5473 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5474 llvm::Type *eltType = coercionType->getElementType(i);
5478 if (ArgHasMaybeUndefAttr)
5479 elt =
Builder.CreateFreeze(elt);
5480 IRCallArgs[IRArgPos++] = elt;
5482 assert(IRArgPos == FirstIRArg + NumIRArgs);
5492 unsigned IRArgPos = FirstIRArg;
5493 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5494 assert(IRArgPos == FirstIRArg + NumIRArgs);
5500 const CGCallee &ConcreteCallee =
Callee.prepareConcreteCallee(*
this);
5506 assert(IRFunctionArgs.hasInallocaArg());
5507 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5518 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5519 llvm::Value *Ptr) -> llvm::Function * {
5520 if (!CalleeFT->isVarArg())
5524 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5525 if (CE->getOpcode() == llvm::Instruction::BitCast)
5526 Ptr = CE->getOperand(0);
5529 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5533 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5537 if (OrigFT->isVarArg() ||
5538 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5539 OrigFT->getReturnType() != CalleeFT->getReturnType())
5542 for (
unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5543 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5549 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5551 IRFuncTy = OrigFn->getFunctionType();
5566 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5567 for (
unsigned i = 0; i < IRCallArgs.size(); ++i) {
5569 if (IRFunctionArgs.hasInallocaArg() &&
5570 i == IRFunctionArgs.getInallocaArgNo())
5572 if (i < IRFuncTy->getNumParams())
5573 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5578 for (
unsigned i = 0; i < IRCallArgs.size(); ++i)
5579 LargestVectorWidth = std::max(LargestVectorWidth,
5584 llvm::AttributeList Attrs;
5590 if (
CallingConv == llvm::CallingConv::X86_VectorCall &&
5591 getTarget().getTriple().isWindowsArm64EC()) {
5592 CGM.
Error(
Loc,
"__vectorcall calling convention is not currently "
5597 if (FD->hasAttr<StrictFPAttr>())
5599 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5604 if (FD->hasAttr<OptimizeNoneAttr>() &&
getLangOpts().FastMath)
5610 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoMerge);
5614 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5619 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5624 Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Convergent);
5633 !(TargetDecl && TargetDecl->
hasAttr<NoInlineAttr>())) {
5635 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5640 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5647 CannotThrow =
false;
5656 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5658 if (
auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5659 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5667 if (UnusedReturnSizePtr)
5669 UnusedReturnSizePtr);
5671 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr :
getInvokeDest();
5677 !isa_and_nonnull<FunctionDecl>(TargetDecl))
5684 if (FD->hasAttr<StrictFPAttr>())
5686 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5688 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*
this, TargetDecl);
5689 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5691 AllocAlignAttrEmitter AllocAlignAttrEmitter(*
this, TargetDecl, CallArgs);
5692 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5697 CI =
Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5700 CI =
Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5704 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
5705 CI->getCalledFunction()->getName().starts_with(
"_Z4sqrt")) {
5714 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(
CurFuncDecl)) {
5715 if (
const auto *A = FD->getAttr<CFGuardAttr>()) {
5716 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5722 CI->setAttributes(Attrs);
5723 CI->setCallingConv(
static_cast<llvm::CallingConv::ID
>(
CallingConv));
5727 if (!CI->getType()->isVoidTy())
5728 CI->setName(
"call");
5734 LargestVectorWidth =
5740 if (!CI->getCalledFunction())
5747 AddObjCARCExceptionMetadata(CI);
5750 if (llvm::CallInst *
Call = dyn_cast<llvm::CallInst>(CI)) {
5751 if (TargetDecl && TargetDecl->
hasAttr<NotTailCalledAttr>())
5752 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5753 else if (IsMustTail) {
5760 else if (
Call->isIndirectCall())
5762 else if (isa_and_nonnull<FunctionDecl>(TargetDecl)) {
5763 if (!cast<FunctionDecl>(TargetDecl)->isDefined())
5768 {cast<FunctionDecl>(TargetDecl),
Loc});
5772 if (llvm::GlobalValue::isWeakForLinker(
Linkage) ||
5773 llvm::GlobalValue::isDiscardableIfUnused(
Linkage))
5780 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5786 TargetDecl->
hasAttr<MSAllocatorAttr>())
5790 if (TargetDecl && TargetDecl->
hasAttr<ErrorAttr>()) {
5791 llvm::ConstantInt *
Line =
5793 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(
Line);
5795 CI->setMetadata(
"srcloc", MDT);
5803 if (CI->doesNotReturn()) {
5804 if (UnusedReturnSizePtr)
5808 if (
SanOpts.
has(SanitizerKind::Unreachable)) {
5811 if (
auto *F = CI->getCalledFunction())
5812 F->removeFnAttr(llvm::Attribute::NoReturn);
5813 CI->removeFnAttr(llvm::Attribute::NoReturn);
5818 SanitizerKind::KernelAddress)) {
5819 SanitizerScope SanScope(
this);
5820 llvm::IRBuilder<>::InsertPointGuard IPGuard(
Builder);
5822 auto *FnType = llvm::FunctionType::get(
CGM.
VoidTy,
false);
5823 llvm::FunctionCallee
Fn =
5830 Builder.ClearInsertionPoint();
5850 if (CI->getType()->isVoidTy())
5854 Builder.ClearInsertionPoint();
5860 if (swiftErrorTemp.
isValid()) {
5879 if (IsVirtualFunctionPointerThunk) {
5890 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5892 unsigned unpaddedIndex = 0;
5893 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5894 llvm::Type *eltType = coercionType->getElementType(i);
5898 llvm::Value *elt = CI;
5899 if (requiresExtract)
5900 elt =
Builder.CreateExtractValue(elt, unpaddedIndex++);
5902 assert(unpaddedIndex == 0);
5911 if (UnusedReturnSizePtr)
5928 llvm::Value *Real =
Builder.CreateExtractValue(CI, 0);
5929 llvm::Value *Imag =
Builder.CreateExtractValue(CI, 1);
5937 llvm::Value *
V = CI;
5938 if (
V->getType() != RetIRTy)
5948 if (
auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
5949 llvm::Value *
V = CI;
5950 if (
auto *ScalableSrcTy =
5951 dyn_cast<llvm::ScalableVectorType>(
V->getType())) {
5952 if (FixedDstTy->getElementType() ==
5953 ScalableSrcTy->getElementType()) {
5955 V =
Builder.CreateExtractVector(FixedDstTy,
V, Zero,
5969 DestIsVolatile =
false;
5990 llvm_unreachable(
"Invalid ABI kind for return argument");
5993 llvm_unreachable(
"Unhandled ABIArgInfo::Kind");
5998 if (
Ret.isScalar() && TargetDecl) {
5999 AssumeAlignedAttrEmitter.EmitAsAnAssumption(
Loc, RetTy, Ret);
6000 AllocAlignAttrEmitter.EmitAsAnAssumption(
Loc, RetTy, Ret);
6005 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
6006 LifetimeEnd.Emit(*
this, {});
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, const LangOptions &LangOpts, const NoBuiltinAttr *NBA=nullptr)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsWindows)
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, llvm::DenormalMode FP32DenormalMode, llvm::AttrBuilder &FuncAttrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF)
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts)
Return the nofpclass mask that can be applied to floating-point parameters.
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
CodeGenFunction::ComplexPairTy ComplexPairTy
enum clang::sema::@1655::IndirectLocalPathEntry::EntryKind Kind
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
llvm::MachO::Target Target
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
static QualType getParamType(Sema &SemaRef, ArrayRef< ResultCandidate > Candidates, unsigned N)
Get the type of the Nth parameter from a given set of overload candidates.
static QualType getPointeeType(const MemRegion *R)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod, bool IsBuiltin=false) const
Retrieves the default calling convention for the current target.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const
Return number of constant array elements.
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
const FunctionProtoType * getFunctionType() const
getFunctionType - Return the underlying function type for this block.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const LLVM_READONLY
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
llvm::StructType * getCoerceAndExpandType() const
bool getIndirectRealign() const
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isCoerceAndExpand() const
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
CharUnits getIndirectAlign() const
virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const
Emit the target dependent code to load a value of.
virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StringRef getName() const
Return the IR name of the pointer value.
llvm::PointerType * getType() const
Return the type of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
const BlockExpr * BlockExpression
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
const CallExpr * getVirtualCallExpr() const
llvm::Value * getFunctionPointer() const
llvm::FunctionType * getVirtualFunctionType() const
const CGPointerAuthInfo & getPointerAuthInfo() const
GlobalDecl getVirtualMethodDecl() const
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
ABIArgInfo & getReturnInfo()
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
unsigned arg_size() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
llvm::Instruction * getStackBase() const
void addUncopiedAggregate(LValue LV, QualType type)
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
const CodeGen::CGBlockInfo * BlockInfo
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void callCStructDestructor(LValue Dst)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
bool currentFunctionUsesSEHTry() const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
const TargetInfo & getTarget() const
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::BasicBlock * getInvokeDest()
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
CGDebugInfo * getDebugInfo()
Address EmitVAListRef(const Expr *E)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Type * ConvertType(QualType T)
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
CodeGenTypes & getTypes() const
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const CallExpr * MustTailCall
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
CallType * addControlledConvergenceToken(CallType *Input)
This class organizes the cross-function state that is used while generating LLVM code.
llvm::MDNode * getNoObjCARCExceptionsMetadata()
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
const ABIInfo & getABIInfo()
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
void addUndefinedGlobalForTailCall(std::pair< const FunctionDecl *, SourceLocation > Global)
ObjCEntrypoints & getObjCEntrypoints() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
llvm::GlobalVariable::LinkageTypes getFunctionLinkage(GlobalDecl GD)
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
bool ReturnTypeHasInReg(const CGFunctionInfo &FI)
Return true iff the given type has inreg set.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
void valueProfile(CGBuilderTy &Builder, uint32_t ValueKind, llvm::Instruction *ValueSite, llvm::Value *ValuePtr)
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
CGCXXABI & getCXXABI() const
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
ASTContext & getContext() const
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
unsigned getTargetAddressSpace(QualType T) const
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
EHScopeStack::Cleanup * getCleanup()
Information for lazily generating a cleanup.
virtual bool isRedundantBeforeReturn()
A saved depth on the scope stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
iterator end() const
Returns an iterator pointing to the outermost EH scope.
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
bool isVolatileQualified() const
LangAS getAddressSpace() const
CharUnits getAlignment() const
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
ARCPreciseLifetime_t isARCPreciseLifetime() const
Qualifiers::ObjCLifetime getObjCLifetime() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
An abstract representation of an aligned address.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
llvm::Value * getPointer() const
static RawAddress invalid()
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
static void initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::AttrBuilder &FuncAttrs)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isZeroLengthBitField(const ASTContext &Ctx) const
Is this a zero-length bit-field? Such bit-fields aren't really bit-fields at all and instead act as a...
bool isUnnamedBitField() const
Determines whether this is an unnamed bitfield.
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isVariadic() const
Whether this function prototype is variadic.
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
const Decl * getDecl() const
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
ArrayRef< ParmVarDecl * > parameters() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getCanonicalType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
bool isParamDestroyedInCallee() const
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getDecl() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change,...
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool useObjCFPRetForRealType(FloatModeKind T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
The base class of the type hierarchy.
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
bool isBlockPointerType() const
bool isIncompleteArrayType() const
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
QualType getCanonicalTypeInternal() const
bool isMemberPointerType() const
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
bool isAnyPointerType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isNullPtrType() const
bool isObjCRetainableType() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
const Expr * getSubExpr() const
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
bool This(InterpState &S, CodePtr OpPC)
bool Zero(InterpState &S, CodePtr OpPC)
bool Load(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
bool isInstanceMethod(const Decl *D)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
@ Result
The result type of a method or function.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_Complete
Complete object dtor.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
const FunctionProtoType * T
CallingConv
CallingConv - Specifies the calling convention that a function uses.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::BasicBlock * getBlock() const
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
CharUnits getPointerAlign() const
LangAS getASTAllocaAddressSpace() const
bool isMSVCXXPersonality() const
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Iterator for iterating over Stmt * arrays that contain only T *.