clang 19.0.0git
AArch64.cpp
Go to the documentation of this file.
1//===- AArch64.cpp --------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
11#include "clang/AST/Decl.h"
13#include "llvm/TargetParser/AArch64TargetParser.h"
14
15using namespace clang;
16using namespace clang::CodeGen;
17
18//===----------------------------------------------------------------------===//
19// AArch64 ABI Implementation
20//===----------------------------------------------------------------------===//
21
22namespace {
23
24class AArch64ABIInfo : public ABIInfo {
26
27public:
28 AArch64ABIInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
29 : ABIInfo(CGT), Kind(Kind) {}
30
31 bool isSoftFloat() const { return Kind == AArch64ABIKind::AAPCSSoft; }
32
33private:
34 AArch64ABIKind getABIKind() const { return Kind; }
35 bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; }
36
37 ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
38 ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
39 unsigned CallingConvention) const;
40 ABIArgInfo coerceIllegalVector(QualType Ty) const;
41 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
43 uint64_t Members) const override;
45
46 bool isIllegalVectorType(QualType Ty) const;
47
48 void computeInfo(CGFunctionInfo &FI) const override {
49 if (!::classifyReturnType(getCXXABI(), FI, *this))
50 FI.getReturnInfo() =
52
53 for (auto &it : FI.arguments())
54 it.info = classifyArgumentType(it.type, FI.isVariadic(),
56 }
57
58 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
59 CodeGenFunction &CGF) const;
60
61 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
62 AArch64ABIKind Kind) const;
63
65 QualType Ty) const override {
66 llvm::Type *BaseTy = CGF.ConvertType(Ty);
67 if (isa<llvm::ScalableVectorType>(BaseTy))
68 llvm::report_fatal_error("Passing SVE types to variadic functions is "
69 "currently not supported");
70
71 return Kind == AArch64ABIKind::Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
72 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
73 : EmitAAPCSVAArg(VAListAddr, Ty, CGF, Kind);
74 }
75
77 QualType Ty) const override;
78
79 bool allowBFloatArgsAndRet() const override {
80 return getTarget().hasBFloat16Type();
81 }
82
84 void appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
85 raw_ostream &Out) const override;
86 void appendAttributeMangling(StringRef AttrStr,
87 raw_ostream &Out) const override;
88};
89
90class AArch64SwiftABIInfo : public SwiftABIInfo {
91public:
92 explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
93 : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
94
95 bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
96 unsigned NumElts) const override;
97};
98
99class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
100public:
101 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
102 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {
103 SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
104 }
105
106 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
107 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
108 }
109
110 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
111 return 31;
112 }
113
114 bool doesReturnSlotInterfereWithArgs() const override { return false; }
115
116 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
117 CodeGen::CodeGenModule &CGM) const override {
118 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
119 if (!FD)
120 return;
121
122 const auto *TA = FD->getAttr<TargetAttr>();
123 if (TA == nullptr)
124 return;
125
127 CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
128 if (Attr.BranchProtection.empty())
129 return;
130
132 StringRef Error;
133 (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
134 Attr.CPU, BPI, Error);
135 assert(Error.empty());
136
137 auto *Fn = cast<llvm::Function>(GV);
138 Fn->addFnAttr("sign-return-address", BPI.getSignReturnAddrStr());
139
140 if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
141 Fn->addFnAttr("sign-return-address-key",
142 BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
143 ? "a_key"
144 : "b_key");
145 }
146
147 Fn->addFnAttr("branch-target-enforcement",
148 BPI.BranchTargetEnforcement ? "true" : "false");
149 Fn->addFnAttr("branch-protection-pauth-lr",
150 BPI.BranchProtectionPAuthLR ? "true" : "false");
151 Fn->addFnAttr("guarded-control-stack",
152 BPI.GuardedControlStack ? "true" : "false");
153 }
154
156 llvm::Type *Ty) const override {
157 if (CGF.getTarget().hasFeature("ls64")) {
158 auto *ST = dyn_cast<llvm::StructType>(Ty);
159 if (ST && ST->getNumElements() == 1) {
160 auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
161 if (AT && AT->getNumElements() == 8 &&
162 AT->getElementType()->isIntegerTy(64))
163 return true;
164 }
165 }
167 }
168
170 const FunctionDecl *Decl) const override;
171
173 const FunctionDecl *Caller,
174 const FunctionDecl *Callee, const CallArgList &Args,
175 QualType ReturnType) const override;
176
177private:
178 // Diagnose calls between functions with incompatible Streaming SVE
179 // attributes.
180 void checkFunctionCallABIStreaming(CodeGenModule &CGM, SourceLocation CallLoc,
181 const FunctionDecl *Caller,
182 const FunctionDecl *Callee) const;
183 // Diagnose calls which must pass arguments in floating-point registers when
184 // the selected target does not have floating-point registers.
185 void checkFunctionCallABISoftFloat(CodeGenModule &CGM, SourceLocation CallLoc,
186 const FunctionDecl *Caller,
187 const FunctionDecl *Callee,
188 const CallArgList &Args,
189 QualType ReturnType) const;
190};
191
192class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
193public:
194 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind K)
195 : AArch64TargetCodeGenInfo(CGT, K) {}
196
197 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
198 CodeGen::CodeGenModule &CGM) const override;
199
200 void getDependentLibraryOption(llvm::StringRef Lib,
201 llvm::SmallString<24> &Opt) const override {
202 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
203 }
204
205 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
206 llvm::SmallString<32> &Opt) const override {
207 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
208 }
209};
210
211void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
212 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
213 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
214 if (GV->isDeclaration())
215 return;
216 addStackProbeTargetAttributes(D, GV, CGM);
217}
218}
219
220ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
221 assert(Ty->isVectorType() && "expected vector type!");
222
223 const auto *VT = Ty->castAs<VectorType>();
224 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) {
225 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
226 assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
227 BuiltinType::UChar &&
228 "unexpected builtin type for SVE predicate!");
229 return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
230 llvm::Type::getInt1Ty(getVMContext()), 16));
231 }
232
233 if (VT->getVectorKind() == VectorKind::SveFixedLengthData) {
234 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
235
236 const auto *BT = VT->getElementType()->castAs<BuiltinType>();
237 llvm::ScalableVectorType *ResType = nullptr;
238 switch (BT->getKind()) {
239 default:
240 llvm_unreachable("unexpected builtin type for SVE vector!");
241 case BuiltinType::SChar:
242 case BuiltinType::UChar:
243 ResType = llvm::ScalableVectorType::get(
244 llvm::Type::getInt8Ty(getVMContext()), 16);
245 break;
246 case BuiltinType::Short:
247 case BuiltinType::UShort:
248 ResType = llvm::ScalableVectorType::get(
249 llvm::Type::getInt16Ty(getVMContext()), 8);
250 break;
251 case BuiltinType::Int:
252 case BuiltinType::UInt:
253 ResType = llvm::ScalableVectorType::get(
254 llvm::Type::getInt32Ty(getVMContext()), 4);
255 break;
256 case BuiltinType::Long:
257 case BuiltinType::ULong:
258 ResType = llvm::ScalableVectorType::get(
259 llvm::Type::getInt64Ty(getVMContext()), 2);
260 break;
261 case BuiltinType::Half:
262 ResType = llvm::ScalableVectorType::get(
263 llvm::Type::getHalfTy(getVMContext()), 8);
264 break;
265 case BuiltinType::Float:
266 ResType = llvm::ScalableVectorType::get(
267 llvm::Type::getFloatTy(getVMContext()), 4);
268 break;
269 case BuiltinType::Double:
270 ResType = llvm::ScalableVectorType::get(
271 llvm::Type::getDoubleTy(getVMContext()), 2);
272 break;
273 case BuiltinType::BFloat16:
274 ResType = llvm::ScalableVectorType::get(
275 llvm::Type::getBFloatTy(getVMContext()), 8);
276 break;
277 }
278 return ABIArgInfo::getDirect(ResType);
279 }
280
281 uint64_t Size = getContext().getTypeSize(Ty);
282 // Android promotes <2 x i8> to i16, not i32
283 if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
284 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
285 return ABIArgInfo::getDirect(ResType);
286 }
287 if (Size <= 32) {
288 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
289 return ABIArgInfo::getDirect(ResType);
290 }
291 if (Size == 64) {
292 auto *ResType =
293 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
294 return ABIArgInfo::getDirect(ResType);
295 }
296 if (Size == 128) {
297 auto *ResType =
298 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
299 return ABIArgInfo::getDirect(ResType);
300 }
301 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
302}
303
305AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
306 unsigned CallingConvention) const {
308
309 // Handle illegal vector types here.
310 if (isIllegalVectorType(Ty))
311 return coerceIllegalVector(Ty);
312
313 if (!isAggregateTypeForABI(Ty)) {
314 // Treat an enum type as its underlying type.
315 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
316 Ty = EnumTy->getDecl()->getIntegerType();
317
318 if (const auto *EIT = Ty->getAs<BitIntType>())
319 if (EIT->getNumBits() > 128)
320 return getNaturalAlignIndirect(Ty, false);
321
322 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
325 }
326
327 // Structures with either a non-trivial destructor or a non-trivial
328 // copy constructor are always indirect.
329 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
330 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
332 }
333
334 // Empty records are always ignored on Darwin, but actually passed in C++ mode
335 // elsewhere for GNU compatibility.
336 uint64_t Size = getContext().getTypeSize(Ty);
337 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
338 if (IsEmpty || Size == 0) {
339 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
340 return ABIArgInfo::getIgnore();
341
342 // GNU C mode. The only argument that gets ignored is an empty one with size
343 // 0.
344 if (IsEmpty && Size == 0)
345 return ABIArgInfo::getIgnore();
346 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
347 }
348
349 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
350 const Type *Base = nullptr;
351 uint64_t Members = 0;
352 bool IsWin64 = Kind == AArch64ABIKind::Win64 ||
353 CallingConvention == llvm::CallingConv::Win64;
354 bool IsWinVariadic = IsWin64 && IsVariadic;
355 // In variadic functions on Windows, all composite types are treated alike,
356 // no special handling of HFAs/HVAs.
357 if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
358 if (Kind != AArch64ABIKind::AAPCS)
360 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
361
362 // For HFAs/HVAs, cap the argument alignment to 16, otherwise
363 // set it to 8 according to the AAPCS64 document.
364 unsigned Align =
365 getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
366 Align = (Align >= 16) ? 16 : 8;
368 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
369 nullptr, true, Align);
370 }
371
372 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
373 if (Size <= 128) {
374 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
375 // same size and alignment.
376 if (getTarget().isRenderScriptTarget()) {
377 return coerceToIntArray(Ty, getContext(), getVMContext());
378 }
379 unsigned Alignment;
380 if (Kind == AArch64ABIKind::AAPCS) {
381 Alignment = getContext().getTypeUnadjustedAlign(Ty);
382 Alignment = Alignment < 128 ? 64 : 128;
383 } else {
384 Alignment =
385 std::max(getContext().getTypeAlign(Ty),
386 (unsigned)getTarget().getPointerWidth(LangAS::Default));
387 }
388 Size = llvm::alignTo(Size, Alignment);
389
390 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
391 // For aggregates with 16-byte alignment, we use i128.
392 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
394 Size == Alignment ? BaseTy
395 : llvm::ArrayType::get(BaseTy, Size / Alignment));
396 }
397
398 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
399}
400
401ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
402 bool IsVariadic) const {
403 if (RetTy->isVoidType())
404 return ABIArgInfo::getIgnore();
405
406 if (const auto *VT = RetTy->getAs<VectorType>()) {
407 if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
408 VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
409 return coerceIllegalVector(RetTy);
410 }
411
412 // Large vector types should be returned via memory.
413 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
414 return getNaturalAlignIndirect(RetTy);
415
416 if (!isAggregateTypeForABI(RetTy)) {
417 // Treat an enum type as its underlying type.
418 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
419 RetTy = EnumTy->getDecl()->getIntegerType();
420
421 if (const auto *EIT = RetTy->getAs<BitIntType>())
422 if (EIT->getNumBits() > 128)
423 return getNaturalAlignIndirect(RetTy);
424
425 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
426 ? ABIArgInfo::getExtend(RetTy)
428 }
429
430 uint64_t Size = getContext().getTypeSize(RetTy);
431 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
432 return ABIArgInfo::getIgnore();
433
434 const Type *Base = nullptr;
435 uint64_t Members = 0;
436 if (isHomogeneousAggregate(RetTy, Base, Members) &&
437 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
438 IsVariadic))
439 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
440 return ABIArgInfo::getDirect();
441
442 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
443 if (Size <= 128) {
444 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
445 // same size and alignment.
446 if (getTarget().isRenderScriptTarget()) {
447 return coerceToIntArray(RetTy, getContext(), getVMContext());
448 }
449
450 if (Size <= 64 && getDataLayout().isLittleEndian()) {
451 // Composite types are returned in lower bits of a 64-bit register for LE,
452 // and in higher bits for BE. However, integer types are always returned
453 // in lower bits for both LE and BE, and they are not rounded up to
454 // 64-bits. We can skip rounding up of composite types for LE, but not for
455 // BE, otherwise composite types will be indistinguishable from integer
456 // types.
458 llvm::IntegerType::get(getVMContext(), Size));
459 }
460
461 unsigned Alignment = getContext().getTypeAlign(RetTy);
462 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
463
464 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
465 // For aggregates with 16-byte alignment, we use i128.
466 if (Alignment < 128 && Size == 128) {
467 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
468 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
469 }
470 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
471 }
472
473 return getNaturalAlignIndirect(RetTy);
474}
475
476/// isIllegalVectorType - check whether the vector type is legal for AArch64.
477bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
478 if (const VectorType *VT = Ty->getAs<VectorType>()) {
479 // Check whether VT is a fixed-length SVE vector. These types are
480 // represented as scalable vectors in function args/return and must be
481 // coerced from fixed vectors.
482 if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
483 VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
484 return true;
485
486 // Check whether VT is legal.
487 unsigned NumElements = VT->getNumElements();
488 uint64_t Size = getContext().getTypeSize(VT);
489 // NumElements should be power of 2.
490 if (!llvm::isPowerOf2_32(NumElements))
491 return true;
492
493 // arm64_32 has to be compatible with the ARM logic here, which allows huge
494 // vectors for some reason.
495 llvm::Triple Triple = getTarget().getTriple();
496 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
497 Triple.isOSBinFormatMachO())
498 return Size <= 32;
499
500 return Size != 64 && (Size != 128 || NumElements == 1);
501 }
502 return false;
503}
504
505bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
506 llvm::Type *EltTy,
507 unsigned NumElts) const {
508 if (!llvm::isPowerOf2_32(NumElts))
509 return false;
510 if (VectorSize.getQuantity() != 8 &&
511 (VectorSize.getQuantity() != 16 || NumElts == 1))
512 return false;
513 return true;
514}
515
516bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
517 // For the soft-float ABI variant, no types are considered to be homogeneous
518 // aggregates.
519 if (Kind == AArch64ABIKind::AAPCSSoft)
520 return false;
521
522 // Homogeneous aggregates for AAPCS64 must have base types of a floating
523 // point type or a short-vector type. This is the same as the 32-bit ABI,
524 // but with the difference that any floating-point type is allowed,
525 // including __fp16.
526 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
527 if (BT->isFloatingPoint())
528 return true;
529 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
530 unsigned VecSize = getContext().getTypeSize(VT);
531 if (VecSize == 64 || VecSize == 128)
532 return true;
533 }
534 return false;
535}
536
537bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
538 uint64_t Members) const {
539 return Members <= 4;
540}
541
542bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
543 const {
544 // AAPCS64 says that the rule for whether something is a homogeneous
545 // aggregate is applied to the output of the data layout decision. So
546 // anything that doesn't affect the data layout also does not affect
547 // homogeneity. In particular, zero-length bitfields don't stop a struct
548 // being homogeneous.
549 return true;
550}
551
552Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
553 CodeGenFunction &CGF,
554 AArch64ABIKind Kind) const {
555 ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
557 // Empty records are ignored for parameter passing purposes.
558 if (AI.isIgnore()) {
559 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
560 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
561 VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
562 auto *Load = CGF.Builder.CreateLoad(VAListAddr);
563 return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
564 }
565
566 bool IsIndirect = AI.isIndirect();
567
568 llvm::Type *BaseTy = CGF.ConvertType(Ty);
569 if (IsIndirect)
570 BaseTy = llvm::PointerType::getUnqual(BaseTy);
571 else if (AI.getCoerceToType())
572 BaseTy = AI.getCoerceToType();
573
574 unsigned NumRegs = 1;
575 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
576 BaseTy = ArrTy->getElementType();
577 NumRegs = ArrTy->getNumElements();
578 }
579 bool IsFPR = Kind != AArch64ABIKind::AAPCSSoft &&
580 (BaseTy->isFloatingPointTy() || BaseTy->isVectorTy());
581
582 // The AArch64 va_list type and handling is specified in the Procedure Call
583 // Standard, section B.4:
584 //
585 // struct {
586 // void *__stack;
587 // void *__gr_top;
588 // void *__vr_top;
589 // int __gr_offs;
590 // int __vr_offs;
591 // };
592
593 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
594 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
595 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
596 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
597
598 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
599 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
600
601 Address reg_offs_p = Address::invalid();
602 llvm::Value *reg_offs = nullptr;
603 int reg_top_index;
604 int RegSize = IsIndirect ? 8 : TySize.getQuantity();
605 if (!IsFPR) {
606 // 3 is the field number of __gr_offs
607 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
608 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
609 reg_top_index = 1; // field number for __gr_top
610 RegSize = llvm::alignTo(RegSize, 8);
611 } else {
612 // 4 is the field number of __vr_offs.
613 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
614 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
615 reg_top_index = 2; // field number for __vr_top
616 RegSize = 16 * NumRegs;
617 }
618
619 //=======================================
620 // Find out where argument was passed
621 //=======================================
622
623 // If reg_offs >= 0 we're already using the stack for this type of
624 // argument. We don't want to keep updating reg_offs (in case it overflows,
625 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
626 // whatever they get).
627 llvm::Value *UsingStack = nullptr;
628 UsingStack = CGF.Builder.CreateICmpSGE(
629 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
630
631 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
632
633 // Otherwise, at least some kind of argument could go in these registers, the
634 // question is whether this particular type is too big.
635 CGF.EmitBlock(MaybeRegBlock);
636
637 // Integer arguments may need to correct register alignment (for example a
638 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
639 // align __gr_offs to calculate the potential address.
640 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
641 int Align = TyAlign.getQuantity();
642
643 reg_offs = CGF.Builder.CreateAdd(
644 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
645 "align_regoffs");
646 reg_offs = CGF.Builder.CreateAnd(
647 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
648 "aligned_regoffs");
649 }
650
651 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
652 // The fact that this is done unconditionally reflects the fact that
653 // allocating an argument to the stack also uses up all the remaining
654 // registers of the appropriate kind.
655 llvm::Value *NewOffset = nullptr;
656 NewOffset = CGF.Builder.CreateAdd(
657 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
658 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
659
660 // Now we're in a position to decide whether this argument really was in
661 // registers or not.
662 llvm::Value *InRegs = nullptr;
663 InRegs = CGF.Builder.CreateICmpSLE(
664 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
665
666 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
667
668 //=======================================
669 // Argument was in registers
670 //=======================================
671
672 // Now we emit the code for if the argument was originally passed in
673 // registers. First start the appropriate block:
674 CGF.EmitBlock(InRegBlock);
675
676 llvm::Value *reg_top = nullptr;
677 Address reg_top_p =
678 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
679 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
680 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
681 CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8));
682 Address RegAddr = Address::invalid();
683 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy;
684
685 if (IsIndirect) {
686 // If it's been passed indirectly (actually a struct), whatever we find from
687 // stored registers or on the stack will actually be a struct **.
688 MemTy = llvm::PointerType::getUnqual(MemTy);
689 }
690
691 const Type *Base = nullptr;
692 uint64_t NumMembers = 0;
693 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
694 if (IsHFA && NumMembers > 1) {
695 // Homogeneous aggregates passed in registers will have their elements split
696 // and stored 16-bytes apart regardless of size (they're notionally in qN,
697 // qN+1, ...). We reload and store into a temporary local variable
698 // contiguously.
699 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
700 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
701 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
702 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
703 Address Tmp = CGF.CreateTempAlloca(HFATy,
704 std::max(TyAlign, BaseTyInfo.Align));
705
706 // On big-endian platforms, the value will be right-aligned in its slot.
707 int Offset = 0;
708 if (CGF.CGM.getDataLayout().isBigEndian() &&
709 BaseTyInfo.Width.getQuantity() < 16)
710 Offset = 16 - BaseTyInfo.Width.getQuantity();
711
712 for (unsigned i = 0; i < NumMembers; ++i) {
713 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
714 Address LoadAddr =
715 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
716 LoadAddr = LoadAddr.withElementType(BaseTy);
717
718 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
719
720 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
721 CGF.Builder.CreateStore(Elem, StoreAddr);
722 }
723
724 RegAddr = Tmp.withElementType(MemTy);
725 } else {
726 // Otherwise the object is contiguous in memory.
727
728 // It might be right-aligned in its slot.
729 CharUnits SlotSize = BaseAddr.getAlignment();
730 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
731 (IsHFA || !isAggregateTypeForABI(Ty)) &&
732 TySize < SlotSize) {
733 CharUnits Offset = SlotSize - TySize;
734 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
735 }
736
737 RegAddr = BaseAddr.withElementType(MemTy);
738 }
739
740 CGF.EmitBranch(ContBlock);
741
742 //=======================================
743 // Argument was on the stack
744 //=======================================
745 CGF.EmitBlock(OnStackBlock);
746
747 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
748 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
749
750 // Again, stack arguments may need realignment. In this case both integer and
751 // floating-point ones might be affected.
752 if (!IsIndirect && TyAlign.getQuantity() > 8) {
753 int Align = TyAlign.getQuantity();
754
755 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
756
757 OnStackPtr = CGF.Builder.CreateAdd(
758 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
759 "align_stack");
760 OnStackPtr = CGF.Builder.CreateAnd(
761 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
762 "align_stack");
763
764 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
765 }
766 Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
767 std::max(CharUnits::fromQuantity(8), TyAlign));
768
769 // All stack slots are multiples of 8 bytes.
770 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
771 CharUnits StackSize;
772 if (IsIndirect)
773 StackSize = StackSlotSize;
774 else
775 StackSize = TySize.alignTo(StackSlotSize);
776
777 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
778 llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
779 CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
780
781 // Write the new value of __stack for the next call to va_arg
782 CGF.Builder.CreateStore(NewStack, stack_p);
783
784 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
785 TySize < StackSlotSize) {
786 CharUnits Offset = StackSlotSize - TySize;
787 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
788 }
789
790 OnStackAddr = OnStackAddr.withElementType(MemTy);
791
792 CGF.EmitBranch(ContBlock);
793
794 //=======================================
795 // Tidy up
796 //=======================================
797 CGF.EmitBlock(ContBlock);
798
799 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
800 OnStackBlock, "vaargs.addr");
801
802 if (IsIndirect)
803 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
804 TyAlign);
805
806 return ResAddr;
807}
808
809Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
810 CodeGenFunction &CGF) const {
811 // The backend's lowering doesn't support va_arg for aggregates or
812 // illegal vector types. Lower VAArg here for these cases and use
813 // the LLVM va_arg instruction for everything else.
814 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
815 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
816
817 uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
818 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
819
820 // Empty records are ignored for parameter passing purposes.
821 if (isEmptyRecord(getContext(), Ty, true))
822 return Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"),
823 CGF.ConvertTypeForMem(Ty), SlotSize);
824
825 // The size of the actual thing passed, which might end up just
826 // being a pointer for indirect types.
827 auto TyInfo = getContext().getTypeInfoInChars(Ty);
828
829 // Arguments bigger than 16 bytes which aren't homogeneous
830 // aggregates should be passed indirectly.
831 bool IsIndirect = false;
832 if (TyInfo.Width.getQuantity() > 16) {
833 const Type *Base = nullptr;
834 uint64_t Members = 0;
835 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
836 }
837
838 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
839 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
840}
841
842Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
843 QualType Ty) const {
844 bool IsIndirect = false;
845
846 // Composites larger than 16 bytes are passed by reference.
847 if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
848 IsIndirect = true;
849
850 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
853 /*allowHigherAlign*/ false);
854}
855
856static bool isStreamingCompatible(const FunctionDecl *F) {
857 if (const auto *T = F->getType()->getAs<FunctionProtoType>())
858 return T->getAArch64SMEAttributes() &
860 return false;
861}
862
863// Report an error if an argument or return value of type Ty would need to be
864// passed in a floating-point register.
866 const StringRef ABIName,
867 const AArch64ABIInfo &ABIInfo,
868 const QualType &Ty, const NamedDecl *D) {
869 const Type *HABase = nullptr;
870 uint64_t HAMembers = 0;
871 if (Ty->isFloatingType() || Ty->isVectorType() ||
872 ABIInfo.isHomogeneousAggregate(Ty, HABase, HAMembers)) {
873 Diags.Report(D->getLocation(), diag::err_target_unsupported_type_for_abi)
874 << D->getDeclName() << Ty << ABIName;
875 }
876}
877
878// If we are using a hard-float ABI, but do not have floating point registers,
879// then report an error for any function arguments or returns which would be
880// passed in floating-pint registers.
881void AArch64TargetCodeGenInfo::checkFunctionABI(
882 CodeGenModule &CGM, const FunctionDecl *FuncDecl) const {
883 const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
885
886 if (!TI.hasFeature("fp") && !ABIInfo.isSoftFloat()) {
888 FuncDecl->getReturnType(), FuncDecl);
889 for (ParmVarDecl *PVD : FuncDecl->parameters()) {
890 diagnoseIfNeedsFPReg(CGM.getDiags(), TI.getABI(), ABIInfo, PVD->getType(),
891 PVD);
892 }
893 }
894}
895
896void AArch64TargetCodeGenInfo::checkFunctionCallABIStreaming(
897 CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
898 const FunctionDecl *Callee) const {
899 if (!Caller || !Callee || !Callee->hasAttr<AlwaysInlineAttr>())
900 return;
901
902 bool CallerIsStreaming =
903 IsArmStreamingFunction(Caller, /*IncludeLocallyStreaming=*/true);
904 bool CalleeIsStreaming =
905 IsArmStreamingFunction(Callee, /*IncludeLocallyStreaming=*/true);
906 bool CallerIsStreamingCompatible = isStreamingCompatible(Caller);
907 bool CalleeIsStreamingCompatible = isStreamingCompatible(Callee);
908
909 if (!CalleeIsStreamingCompatible &&
910 (CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible))
911 CGM.getDiags().Report(CallLoc,
912 diag::err_function_always_inline_attribute_mismatch)
913 << Caller->getDeclName() << Callee->getDeclName() << "streaming";
914 if (auto *NewAttr = Callee->getAttr<ArmNewAttr>())
915 if (NewAttr->isNewZA())
916 CGM.getDiags().Report(CallLoc, diag::err_function_always_inline_new_za)
917 << Callee->getDeclName();
918}
919
920// If the target does not have floating-point registers, but we are using a
921// hard-float ABI, there is no way to pass floating-point, vector or HFA values
922// to functions, so we report an error.
923void AArch64TargetCodeGenInfo::checkFunctionCallABISoftFloat(
924 CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
925 const FunctionDecl *Callee, const CallArgList &Args,
926 QualType ReturnType) const {
927 const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
929
930 if (!Caller || TI.hasFeature("fp") || ABIInfo.isSoftFloat())
931 return;
932
933 diagnoseIfNeedsFPReg(CGM.getDiags(), TI.getABI(), ABIInfo, ReturnType,
934 Caller);
935
936 for (const CallArg &Arg : Args)
937 diagnoseIfNeedsFPReg(CGM.getDiags(), TI.getABI(), ABIInfo, Arg.getType(),
938 Caller);
939}
940
941void AArch64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,
942 SourceLocation CallLoc,
943 const FunctionDecl *Caller,
944 const FunctionDecl *Callee,
945 const CallArgList &Args,
946 QualType ReturnType) const {
947 checkFunctionCallABIStreaming(CGM, CallLoc, Caller, Callee);
948 checkFunctionCallABISoftFloat(CGM, CallLoc, Caller, Callee, Args, ReturnType);
949}
950
951void AArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr,
952 unsigned Index,
953 raw_ostream &Out) const {
954 appendAttributeMangling(Attr->getFeatureStr(Index), Out);
955}
956
957void AArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,
958 raw_ostream &Out) const {
959 if (AttrStr == "default") {
960 Out << ".default";
961 return;
962 }
963
964 Out << "._";
966 AttrStr.split(Features, "+");
967 for (auto &Feat : Features)
968 Feat = Feat.trim();
969
970 llvm::sort(Features, [](const StringRef LHS, const StringRef RHS) {
971 return LHS.compare(RHS) < 0;
972 });
973
974 llvm::SmallDenseSet<StringRef, 8> UniqueFeats;
975 for (auto &Feat : Features)
976 if (auto Ext = llvm::AArch64::parseArchExtension(Feat))
977 if (UniqueFeats.insert(Ext->Name).second)
978 Out << 'M' << Ext->Name;
979}
980
981std::unique_ptr<TargetCodeGenInfo>
983 AArch64ABIKind Kind) {
984 return std::make_unique<AArch64TargetCodeGenInfo>(CGM.getTypes(), Kind);
985}
986
987std::unique_ptr<TargetCodeGenInfo>
989 AArch64ABIKind K) {
990 return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.getTypes(), K);
991}
static bool isStreamingCompatible(const FunctionDecl *F)
Definition: AArch64.cpp:856
static void diagnoseIfNeedsFPReg(DiagnosticsEngine &Diags, const StringRef ABIName, const AArch64ABIInfo &ABIInfo, const QualType &Ty, const NamedDecl *D)
Definition: AArch64.cpp:865
TypeInfoChars getTypeInfoInChars(const Type *T) const
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:757
Attr - This represents one attribute.
Definition: Attr.h:42
A fixed int type of a specified bitwidth.
Definition: Type.h:7242
This class is used for builtin types like 'int'.
Definition: Type.h:2981
Kind getKind() const
Definition: Type.h:3023
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition: CharUnits.h:201
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
Definition: ABIInfo.h:45
virtual bool allowBFloatArgsAndRet() const
Definition: ABIInfo.h:56
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
Definition: ABIInfo.cpp:42
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate.
Definition: ABIInfo.cpp:61
CodeGen::CGCXXABI & getCXXABI() const
Definition: ABIInfo.cpp:18
ASTContext & getContext() const
Definition: ABIInfo.cpp:20
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
Definition: ABIInfo.cpp:47
virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const
Definition: ABIInfo.cpp:187
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
Definition: ABIInfo.cpp:51
const TargetInfo & getTarget() const
Definition: ABIInfo.cpp:30
virtual bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const
Definition: ABIInfo.cpp:56
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
static Address invalid()
Definition: Address.h:153
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:305
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition: CGBuilder.h:241
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
llvm::ConstantInt * getSize(CharUnits N)
Definition: CGBuilder.h:99
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:345
RecordArgABI
Specify how one should pass an argument of a record type.
Definition: CGCXXABI.h:150
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:158
CGFunctionInfo - Class to encapsulate the information about a function definition.
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:258
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
llvm::Type * ConvertTypeForMem(QualType T)
const TargetInfo & getTarget() const
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Type * ConvertType(QualType T)
const CGFunctionInfo * CurFnInfo
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
Target specific hooks for defining how a type should be passed or returned from functions with one of...
Definition: ABIInfo.h:128
virtual bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, unsigned NumElts) const
Returns true if the given vector type is legal from Swift's calling convention perspective.
Definition: ABIInfo.cpp:278
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition: TargetInfo.h:46
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
Definition: TargetInfo.h:194
virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const
Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...
Definition: TargetInfo.h:206
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
Definition: TargetInfo.h:94
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
Definition: TargetInfo.h:75
virtual void checkFunctionABI(CodeGenModule &CGM, const FunctionDecl *Decl) const
Any further codegen related checks that need to be done on a function signature in a target specific ...
Definition: TargetInfo.h:89
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:179
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition: TargetInfo.h:123
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
T * getAttr() const
Definition: DeclBase.h:579
SourceLocation getLocation() const
Definition: DeclBase.h:445
Concrete class used by the front-end to report problems and issues.
Definition: Diagnostic.h:192
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1547
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Definition: Type.h:5575
Represents a function declaration or definition.
Definition: Decl.h:1971
QualType getReturnType() const
Definition: Decl.h:2754
ArrayRef< ParmVarDecl * > parameters() const
Definition: Decl.h:2683
Represents a prototype with parameter type info, e.g.
Definition: Type.h:4656
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
Definition: Type.h:5094
@ SME_PStateSMCompatibleMask
Definition: Type.h:4518
This represents a decl that may have a name.
Definition: Decl.h:249
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition: Decl.h:315
Represents a parameter to a function.
Definition: Decl.h:1761
A (possibly-)qualified type.
Definition: Type.h:940
Encodes a location in the source.
Exposes information about the current target.
Definition: TargetInfo.h:218
virtual StringRef getABI() const
Get the ABI currently in use.
Definition: TargetInfo.h:1324
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
Definition: TargetInfo.cpp:548
virtual bool hasBFloat16Type() const
Determine whether the _BFloat16 type is supported on this target.
Definition: TargetInfo.h:699
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
Definition: TargetInfo.h:1472
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
Definition: TargetInfo.h:1448
The base class of the type hierarchy.
Definition: Type.h:1813
bool isVoidType() const
Definition: Type.h:7905
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8193
bool isVectorType() const
Definition: Type.h:7718
bool isFloatingType() const
Definition: Type.cpp:2238
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8126
QualType getType() const
Definition: Decl.h:717
Represents a GCC generic vector type.
Definition: Type.h:3969
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)
Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
Definition: ABIInfoImpl.cpp:79
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)
Definition: AArch64.cpp:982
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)
Definition: AArch64.cpp:988
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1393
The JSON file list parser is used to communicate input to InstallAPI.
@ CPlusPlus
Definition: LangStandard.h:55
const FunctionProtoType * T
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
Definition: Decl.cpp:5760
unsigned long uint64_t
Definition: Format.h:5428
#define true
Definition: stdbool.h:25
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Contains information gathered from parsing the contents of TargetAttr.
Definition: TargetInfo.h:57
LangOptions::SignReturnAddressScopeKind SignReturnAddr
Definition: TargetInfo.h:1409
LangOptions::SignReturnAddressKeyKind SignKey
Definition: TargetInfo.h:1410
const char * getSignReturnAddrStr() const
Definition: TargetInfo.h:1417