10#include "TargetInfo.h"
13#include "llvm/TargetParser/AArch64TargetParser.h"
24class AArch64ABIInfo :
public ABIInfo {
39 bool IsNamedArg,
unsigned CallingConvention,
40 unsigned &NSRN,
unsigned &NPRN)
const;
41 llvm::Type *convertFixedToScalableVectorType(
const VectorType *VT)
const;
43 unsigned &NPRN)
const;
44 ABIArgInfo coerceAndExpandPureScalableAggregate(
45 QualType Ty,
bool IsNamedArg,
unsigned NVec,
unsigned NPred,
47 unsigned &NPRN)
const;
50 uint64_t Members)
const override;
53 bool isIllegalVectorType(
QualType Ty)
const;
55 bool passAsAggregateType(
QualType Ty)
const;
56 bool passAsPureScalableType(
QualType Ty,
unsigned &NV,
unsigned &NP,
59 void flattenType(llvm::Type *Ty,
68 unsigned NSRN = 0, NPRN = 0;
70 const bool IsNamedArg =
87 if (isa<llvm::ScalableVectorType>(BaseTy))
88 llvm::report_fatal_error(
"Passing SVE types to variadic functions is "
89 "currently not supported");
91 return Kind == AArch64ABIKind::Win64
93 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF, Slot)
94 : EmitAAPCSVAArg(VAListAddr, Ty, CGF,
Kind, Slot);
106 raw_ostream &Out)
const override;
108 raw_ostream &Out)
const override;
117 unsigned NumElts)
const override;
124 SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
128 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
145 if (
const auto *TA = FD->
getAttr<TargetAttr>()) {
148 if (!
Attr.BranchProtection.empty()) {
151 Attr.CPU, BPI, Error);
152 assert(
Error.empty());
155 auto *
Fn = cast<llvm::Function>(GV);
160 llvm::Type *Ty)
const override {
162 auto *ST = dyn_cast<llvm::StructType>(Ty);
163 if (ST && ST->getNumElements() == 1) {
164 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
165 if (AT && AT->getNumElements() == 8 &&
166 AT->getElementType()->isIntegerTy(64))
179 QualType ReturnType)
const override;
199class WindowsAArch64TargetCodeGenInfo :
public AArch64TargetCodeGenInfo {
202 : AArch64TargetCodeGenInfo(CGT, K) {}
204 void setTargetAttributes(
const Decl *
D, llvm::GlobalValue *GV,
207 void getDependentLibraryOption(llvm::StringRef Lib,
209 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
212 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
214 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
218void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
220 AArch64TargetCodeGenInfo::setTargetAttributes(
D, GV, CGM);
221 if (GV->isDeclaration())
223 addStackProbeTargetAttributes(
D, GV, CGM);
228AArch64ABIInfo::convertFixedToScalableVectorType(
const VectorType *VT)
const {
231 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
233 BuiltinType::UChar &&
234 "unexpected builtin type for SVE predicate!");
235 return llvm::ScalableVectorType::get(llvm::Type::getInt1Ty(getVMContext()),
241 switch (BT->getKind()) {
243 llvm_unreachable(
"unexpected builtin type for SVE vector!");
245 case BuiltinType::SChar:
246 case BuiltinType::UChar:
247 return llvm::ScalableVectorType::get(
248 llvm::Type::getInt8Ty(getVMContext()), 16);
250 case BuiltinType::Short:
251 case BuiltinType::UShort:
252 return llvm::ScalableVectorType::get(
253 llvm::Type::getInt16Ty(getVMContext()), 8);
255 case BuiltinType::Int:
256 case BuiltinType::UInt:
257 return llvm::ScalableVectorType::get(
258 llvm::Type::getInt32Ty(getVMContext()), 4);
260 case BuiltinType::Long:
261 case BuiltinType::ULong:
262 return llvm::ScalableVectorType::get(
263 llvm::Type::getInt64Ty(getVMContext()), 2);
265 case BuiltinType::Half:
266 return llvm::ScalableVectorType::get(
267 llvm::Type::getHalfTy(getVMContext()), 8);
269 case BuiltinType::Float:
270 return llvm::ScalableVectorType::get(
271 llvm::Type::getFloatTy(getVMContext()), 4);
273 case BuiltinType::Double:
274 return llvm::ScalableVectorType::get(
275 llvm::Type::getDoubleTy(getVMContext()), 2);
277 case BuiltinType::BFloat16:
278 return llvm::ScalableVectorType::get(
279 llvm::Type::getBFloatTy(getVMContext()), 8);
283 llvm_unreachable(
"expected fixed-length SVE vector");
287 unsigned &NPRN)
const {
291 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
294 BuiltinType::UChar &&
295 "unexpected builtin type for SVE predicate!");
296 NPRN = std::min(NPRN + 1, 4u);
298 llvm::Type::getInt1Ty(getVMContext()), 16));
302 NSRN = std::min(NSRN + 1, 8u);
308 if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
309 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
313 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
317 NSRN = std::min(NSRN + 1, 8u);
319 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
323 NSRN = std::min(NSRN + 1, 8u);
325 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
329 return getNaturalAlignIndirect(Ty,
false);
332ABIArgInfo AArch64ABIInfo::coerceAndExpandPureScalableAggregate(
333 QualType Ty,
bool IsNamedArg,
unsigned NVec,
unsigned NPred,
335 unsigned &NPRN)
const {
336 if (!IsNamedArg || NSRN + NVec > 8 || NPRN + NPred > 4)
337 return getNaturalAlignIndirect(Ty,
false);
345 llvm::Type *UnpaddedCoerceToType =
346 UnpaddedCoerceToSeq.size() == 1
347 ? UnpaddedCoerceToSeq[0]
348 : llvm::StructType::get(CGT.getLLVMContext(), UnpaddedCoerceToSeq,
352 flattenType(CGT.ConvertType(Ty), CoerceToSeq);
354 llvm::StructType::get(CGT.getLLVMContext(), CoerceToSeq,
false);
361 unsigned CallingConvention,
363 unsigned &NPRN)
const {
367 if (isIllegalVectorType(Ty))
368 return coerceIllegalVector(Ty, NSRN, NPRN);
370 if (!passAsAggregateType(Ty)) {
373 Ty = EnumTy->getDecl()->getIntegerType();
376 if (EIT->getNumBits() > 128)
377 return getNaturalAlignIndirect(Ty,
false);
380 NSRN = std::min(NSRN + 1, 8u);
382 if (BT->isFloatingPoint())
383 NSRN = std::min(NSRN + 1, 8u);
385 switch (BT->getKind()) {
386 case BuiltinType::MFloat8x8:
387 case BuiltinType::MFloat8x16:
388 NSRN = std::min(NSRN + 1, 8u);
390 case BuiltinType::SveBool:
391 case BuiltinType::SveCount:
392 NPRN = std::min(NPRN + 1, 4u);
394 case BuiltinType::SveBoolx2:
395 NPRN = std::min(NPRN + 2, 4u);
397 case BuiltinType::SveBoolx4:
398 NPRN = std::min(NPRN + 4, 4u);
401 if (BT->isSVESizelessBuiltinType())
403 NSRN + getContext().getBuiltinVectorTypeInfo(BT).NumVectors,
409 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
417 return getNaturalAlignIndirect(Ty, RAA ==
426 if (!getContext().getLangOpts().
CPlusPlus || isDarwinPCS())
431 if (IsEmpty && Size == 0)
439 bool IsWin64 =
Kind == AArch64ABIKind::Win64 ||
440 CallingConvention == llvm::CallingConv::Win64;
441 bool IsWinVariadic = IsWin64 && IsVariadicFn;
444 if (!IsWinVariadic && isHomogeneousAggregate(Ty,
Base, Members)) {
445 NSRN = std::min(NSRN + Members,
uint64_t(8));
446 if (Kind != AArch64ABIKind::AAPCS)
448 llvm::ArrayType::get(CGT.ConvertType(
QualType(
Base, 0)), Members));
453 getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
454 Align = (Align >= 16) ? 16 : 8;
456 llvm::ArrayType::get(CGT.ConvertType(
QualType(
Base, 0)), Members), 0,
457 nullptr,
true, Align);
462 if (Kind == AArch64ABIKind::AAPCS) {
463 unsigned NVec = 0, NPred = 0;
465 if (passAsPureScalableType(Ty, NVec, NPred, UnpaddedCoerceToSeq) &&
467 return coerceAndExpandPureScalableAggregate(
468 Ty, IsNamedArg, NVec, NPred, UnpaddedCoerceToSeq, NSRN, NPRN);
474 if (Kind == AArch64ABIKind::AAPCS) {
475 Alignment = getContext().getTypeUnadjustedAlign(Ty);
476 Alignment = Alignment < 128 ? 64 : 128;
479 std::max(getContext().getTypeAlign(Ty),
480 (
unsigned)getTarget().getPointerWidth(LangAS::Default));
482 Size = llvm::alignTo(Size, Alignment);
486 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
488 Size == Alignment ? BaseTy
489 : llvm::ArrayType::get(BaseTy, Size / Alignment));
492 return getNaturalAlignIndirect(Ty,
false);
496 bool IsVariadicFn)
const {
502 VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
503 unsigned NSRN = 0, NPRN = 0;
504 return coerceIllegalVector(RetTy, NSRN, NPRN);
509 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128)
510 return getNaturalAlignIndirect(RetTy);
512 if (!passAsAggregateType(RetTy)) {
515 RetTy = EnumTy->getDecl()->getIntegerType();
518 if (EIT->getNumBits() > 128)
519 return getNaturalAlignIndirect(RetTy);
521 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
533 if (isHomogeneousAggregate(RetTy,
Base, Members) &&
534 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
542 if (Kind == AArch64ABIKind::AAPCS) {
543 unsigned NSRN = 0, NPRN = 0;
544 unsigned NVec = 0, NPred = 0;
546 if (passAsPureScalableType(RetTy, NVec, NPred, UnpaddedCoerceToSeq) &&
548 return coerceAndExpandPureScalableAggregate(
549 RetTy,
true, NVec, NPred, UnpaddedCoerceToSeq, NSRN,
555 if (Size <= 64 && getDataLayout().isLittleEndian()) {
563 llvm::IntegerType::get(getVMContext(), Size));
566 unsigned Alignment = getContext().getTypeAlign(RetTy);
567 Size = llvm::alignTo(Size, 64);
571 if (Alignment < 128 && Size == 128) {
572 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
578 return getNaturalAlignIndirect(RetTy);
582bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
595 if (!llvm::isPowerOf2_32(NumElements))
600 llvm::Triple Triple = getTarget().getTriple();
601 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
602 Triple.isOSBinFormatMachO())
605 return Size != 64 && (
Size != 128 || NumElements == 1);
610bool AArch64SwiftABIInfo::isLegalVectorType(
CharUnits VectorSize,
612 unsigned NumElts)
const {
613 if (!llvm::isPowerOf2_32(NumElts))
621bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
632 if (BT->isFloatingPoint() || BT->getKind() == BuiltinType::MFloat8x16 ||
633 BT->getKind() == BuiltinType::MFloat8x8)
637 Kind == VectorKind::SveFixedLengthData ||
638 Kind == VectorKind::SveFixedLengthPredicate)
641 unsigned VecSize = getContext().getTypeSize(VT);
642 if (VecSize == 64 || VecSize == 128)
648bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *
Base,
649 uint64_t Members)
const {
653bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
663bool AArch64ABIInfo::passAsAggregateType(
QualType Ty)
const {
667 getContext().getBuiltinVectorTypeInfo(BT).NumVectors > 1;
680bool AArch64ABIInfo::passAsPureScalableType(
681 QualType Ty,
unsigned &NVec,
unsigned &NPred,
688 unsigned NV = 0, NP = 0;
690 if (!passAsPureScalableType(AT->getElementType(), NV, NP, EltCoerceToSeq))
693 if (CoerceToSeq.size() + NElt * EltCoerceToSeq.size() > 12)
696 for (uint64_t I = 0; I < NElt; ++I)
697 llvm::copy(EltCoerceToSeq, std::back_inserter(CoerceToSeq));
716 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
717 for (
const auto &I : CXXRD->bases()) {
720 if (!passAsPureScalableType(I.getType(), NVec, NPred, CoerceToSeq))
726 for (
const auto *FD : RD->
fields()) {
730 if (!passAsPureScalableType(FT, NVec, NPred, CoerceToSeq))
738 if (VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
740 if (CoerceToSeq.size() + 1 > 12)
742 CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
748 if (CoerceToSeq.size() + 1 > 12)
750 CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
762#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
763 case BuiltinType::Id: \
764 isPredicate = false; \
766#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
767 case BuiltinType::Id: \
768 isPredicate = true; \
770#define SVE_TYPE(Name, Id, SingletonId)
771#include "clang/Basic/AArch64SVEACLETypes.def"
777 getContext().getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
779 "Expected 1, 2, 3 or 4 vectors!");
784 auto VTy = llvm::ScalableVectorType::get(CGT.ConvertType(Info.
ElementType),
785 Info.
EC.getKnownMinValue());
787 if (CoerceToSeq.size() + Info.
NumVectors > 12)
789 std::fill_n(std::back_inserter(CoerceToSeq), Info.
NumVectors, VTy);
797void AArch64ABIInfo::flattenType(
801 Flattened.push_back(Ty);
805 if (
const auto *AT = dyn_cast<llvm::ArrayType>(Ty)) {
806 uint64_t NElt = AT->getNumElements();
811 flattenType(AT->getElementType(), EltFlattened);
813 for (uint64_t I = 0; I < NElt; ++I)
814 llvm::copy(EltFlattened, std::back_inserter(Flattened));
818 if (
const auto *ST = dyn_cast<llvm::StructType>(Ty)) {
819 for (
auto *ET : ST->elements())
820 flattenType(ET, Flattened);
824 Flattened.push_back(Ty);
833 unsigned NSRN = 0, NPRN = 0;
845 BaseTy = llvm::PointerType::getUnqual(BaseTy);
849 unsigned NumRegs = 1;
850 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
851 BaseTy = ArrTy->getElementType();
852 NumRegs = ArrTy->getNumElements();
855 !isSoftFloat() && (BaseTy->isFloatingPointTy() || BaseTy->isVectorTy());
873 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
874 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
877 llvm::Value *reg_offs =
nullptr;
879 int RegSize = IsIndirect ? 8 : TySize.
getQuantity();
885 RegSize = llvm::alignTo(RegSize, 8);
891 RegSize = 16 * NumRegs;
902 llvm::Value *UsingStack =
nullptr;
903 UsingStack = CGF.
Builder.CreateICmpSGE(
904 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
906 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
915 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
918 reg_offs = CGF.
Builder.CreateAdd(
919 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
921 reg_offs = CGF.
Builder.CreateAnd(
922 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
930 llvm::Value *NewOffset =
nullptr;
931 NewOffset = CGF.
Builder.CreateAdd(
932 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
937 llvm::Value *InRegs =
nullptr;
938 InRegs = CGF.
Builder.CreateICmpSLE(
939 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
941 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
951 llvm::Value *reg_top =
nullptr;
963 MemTy = llvm::PointerType::getUnqual(MemTy);
968 bool IsHFA = isHomogeneousAggregate(Ty,
Base, NumMembers);
969 if (IsHFA && NumMembers > 1) {
974 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
975 auto BaseTyInfo = getContext().getTypeInfoInChars(
QualType(
Base, 0));
977 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
979 std::max(TyAlign, BaseTyInfo.Align));
984 BaseTyInfo.Width.getQuantity() < 16)
985 Offset = 16 - BaseTyInfo.Width.getQuantity();
987 for (
unsigned i = 0; i < NumMembers; ++i) {
1004 CharUnits SlotSize = BaseAddr.getAlignment();
1007 TySize < SlotSize) {
1037 StackSize = StackSlotSize;
1039 StackSize = TySize.
alignTo(StackSlotSize);
1043 CGF.
Int8Ty, OnStackPtr, StackSizeC,
"new_stack");
1049 TySize < StackSlotSize) {
1050 CharUnits Offset = StackSlotSize - TySize;
1064 OnStackBlock,
"vaargs.addr");
1089 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
1098 auto TyInfo = getContext().getTypeInfoInChars(Ty);
1102 bool IsIndirect =
false;
1103 if (TyInfo.Width.getQuantity() > 16) {
1106 IsIndirect = !isHomogeneousAggregate(Ty,
Base, Members);
1115 bool IsIndirect =
false;
1137 const StringRef ABIName,
1138 const AArch64ABIInfo &
ABIInfo,
1141 const Type *HABase =
nullptr;
1142 uint64_t HAMembers = 0;
1145 Diags.
Report(loc, diag::err_target_unsupported_type_for_abi)
1146 <<
D->getDeclName() << Ty << ABIName;
1153void AArch64TargetCodeGenInfo::checkFunctionABI(
1155 const AArch64ABIInfo &
ABIInfo = getABIInfo<AArch64ABIInfo>();
1185 bool CallerIsStreaming =
1187 bool CalleeIsStreaming =
1194 if (!CalleeIsStreamingCompatible &&
1195 (CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible)) {
1196 if (CalleeIsStreaming)
1201 if (
auto *NewAttr = Callee->getAttr<ArmNewAttr>())
1202 if (NewAttr->isNewZA())
1205 return Inlinability;
1208void AArch64TargetCodeGenInfo::checkFunctionCallABIStreaming(
1211 if (!Caller || !Callee || !
Callee->hasAttr<AlwaysInlineAttr>())
1222 ? diag::err_function_always_inline_attribute_mismatch
1223 : diag::warn_function_always_inline_attribute_mismatch)
1228 CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_za)
1229 <<
Callee->getDeclName();
1235void AArch64TargetCodeGenInfo::checkFunctionCallABISoftFloat(
1239 const AArch64ABIInfo &
ABIInfo = getABIInfo<AArch64ABIInfo>();
1246 Callee ? Callee : Caller, CallLoc);
1248 for (
const CallArg &Arg : Args)
1250 Callee ? Callee : Caller, CallLoc);
1253void AArch64TargetCodeGenInfo::checkFunctionCallABI(
CodeGenModule &CGM,
1259 checkFunctionCallABIStreaming(CGM, CallLoc, Caller, Callee);
1260 checkFunctionCallABISoftFloat(CGM, CallLoc, Caller, Callee, Args, ReturnType);
1263bool AArch64TargetCodeGenInfo::wouldInliningViolateFunctionCallABI(
1265 return Caller &&
Callee &&
1269void AArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *
Attr,
1271 raw_ostream &Out)
const {
1272 appendAttributeMangling(
Attr->getFeatureStr(Index), Out);
1275void AArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,
1276 raw_ostream &Out)
const {
1277 if (AttrStr ==
"default") {
1284 AttrStr.split(Features,
"+");
1285 for (
auto &Feat : Features)
1288 llvm::sort(Features, [](
const StringRef LHS,
const StringRef RHS) {
1289 return LHS.compare(RHS) < 0;
1292 llvm::SmallDenseSet<StringRef, 8> UniqueFeats;
1293 for (
auto &Feat : Features)
1294 if (
auto Ext = llvm::AArch64::parseFMVExtension(Feat))
1295 if (UniqueFeats.insert(Ext->Name).second)
1296 Out <<
'M' << Ext->Name;
1299std::unique_ptr<TargetCodeGenInfo>
1302 return std::make_unique<AArch64TargetCodeGenInfo>(CGM.
getTypes(), Kind);
1305std::unique_ptr<TargetCodeGenInfo>
1308 return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.
getTypes(), K);
static bool isStreamingCompatible(const FunctionDecl *F)
@ ErrorCalleeRequiresNewZA
@ WarnIncompatibleStreamingModes
@ IncompatibleStreamingModes
@ LLVM_MARK_AS_BITMASK_ENUM
@ ErrorIncompatibleStreamingModes
static ArmSMEInlinability GetArmSMEInlinability(const FunctionDecl *Caller, const FunctionDecl *Callee)
Determines if there are any Arm SME ABI issues with inlining Callee into Caller.
static void diagnoseIfNeedsFPReg(DiagnosticsEngine &Diags, const StringRef ABIName, const AArch64ABIInfo &ABIInfo, const QualType &Ty, const NamedDecl *D, SourceLocation loc)
TypeInfoChars getTypeInfoInChars(const Type *T) const
const TargetInfo & getTargetInfo() const
Attr - This represents one attribute.
A fixed int type of a specified bitwidth.
This class is used for builtin types like 'int'.
Represents a C++ struct/union/class.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
llvm::Type * getCoerceToType() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
virtual bool allowBFloatArgsAndRet() const
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate.
CodeGen::CGCXXABI & getCXXABI() const
ASTContext & getContext() const
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const
virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const
Emit the target dependent code to load a value of.
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
const TargetInfo & getTarget() const
virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::ConstantInt * getSize(CharUnits N)
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Default
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
RequiredArgs getRequiredArgs() const
CallArgList - Type for representing both the value and type of arguments in a call.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
llvm::Type * ConvertTypeForMem(QualType T)
const TargetInfo & getTarget() const
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
ASTContext & getContext() const
llvm::Type * ConvertType(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
unsigned getNumRequiredArgs() const
Target specific hooks for defining how a type should be passed or returned from functions with one of...
virtual bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, unsigned NumElts) const
Returns true if the given vector type is legal from Swift's calling convention perspective.
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
virtual bool wouldInliningViolateFunctionCallABI(const FunctionDecl *Caller, const FunctionDecl *Callee) const
Returns true if inlining the function call would produce incorrect code for the current target and sh...
virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const
Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...
static void setBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::Function &F)
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual void checkFunctionABI(CodeGenModule &CGM, const FunctionDecl *Decl) const
Any further codegen related checks that need to be done on a function signature in a target specific ...
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Represents the canonical version of C arrays with a specified constant size.
Decl - This represents one declaration (or definition), e.g.
SourceLocation getLocation() const
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Represents a function declaration or definition.
QualType getReturnType() const
ArrayRef< ParmVarDecl * > parameters() const
Represents a prototype with parameter type info, e.g.
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
@ SME_PStateSMCompatibleMask
This represents a decl that may have a name.
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Represents a parameter to a function.
A (possibly-)qualified type.
Represents a struct/union/class.
field_range fields() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Encodes a location in the source.
Exposes information about the current target.
virtual StringRef getABI() const
Get the ABI currently in use.
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
virtual bool hasBFloat16Type() const
Determine whether the _BFloat16 type is supported on this target.
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
The base class of the type hierarchy.
bool isSVESizelessBuiltinType() const
Returns true for SVE scalable vector types.
const T * castAs() const
Member-template castAs<specific type>.
bool isBuiltinType() const
Helper methods to distinguish type categories.
bool isVectorType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
Represents a GCC generic vector type.
unsigned getNumElements() const
VectorKind getVectorKind() const
QualType getElementType() const
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
The JSON file list parser is used to communicate input to InstallAPI.
const FunctionProtoType * T
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty
Contains information gathered from parsing the contents of TargetAttr.