clang 20.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCUDARuntime.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGObjCRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CGRecordLayout.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "TargetInfo.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/DeclObjC.h"
29#include "clang/AST/NSAPI.h"
34#include "llvm/ADT/STLExtras.h"
35#include "llvm/ADT/ScopeExit.h"
36#include "llvm/ADT/StringExtras.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/Intrinsics.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/MDBuilder.h"
41#include "llvm/IR/MatrixBuilder.h"
42#include "llvm/Support/ConvertUTF.h"
43#include "llvm/Support/Endian.h"
44#include "llvm/Support/MathExtras.h"
45#include "llvm/Support/Path.h"
46#include "llvm/Support/xxhash.h"
47#include "llvm/Transforms/Utils/SanitizerStats.h"
48
49#include <optional>
50#include <string>
51
52using namespace clang;
53using namespace CodeGen;
54
55namespace clang {
56// TODO: Introduce frontend options to enabled per sanitizers, similar to
57// `fsanitize-trap`.
58llvm::cl::opt<bool> ClSanitizeGuardChecks(
59 "ubsan-guard-checks", llvm::cl::Optional,
60 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
61} // namespace clang
62
63//===--------------------------------------------------------------------===//
64// Defines for metadata
65//===--------------------------------------------------------------------===//
66
67// Those values are crucial to be the SAME as in ubsan runtime library.
69 /// An integer type.
70 TK_Integer = 0x0000,
71 /// A floating-point type.
72 TK_Float = 0x0001,
73 /// An _BitInt(N) type.
74 TK_BitInt = 0x0002,
75 /// Any other type. The value representation is unspecified.
76 TK_Unknown = 0xffff
77};
78
79//===--------------------------------------------------------------------===//
80// Miscellaneous Helper Methods
81//===--------------------------------------------------------------------===//
82
83/// CreateTempAlloca - This creates a alloca and inserts it into the entry
84/// block.
86CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
87 const Twine &Name,
88 llvm::Value *ArraySize) {
89 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
90 Alloca->setAlignment(Align.getAsAlign());
91 return RawAddress(Alloca, Ty, Align, KnownNonNull);
92}
93
94/// CreateTempAlloca - This creates a alloca and inserts it into the entry
95/// block. The alloca is casted to default address space if necessary.
97 const Twine &Name,
98 llvm::Value *ArraySize,
99 RawAddress *AllocaAddr) {
100 auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
101 if (AllocaAddr)
102 *AllocaAddr = Alloca;
103 llvm::Value *V = Alloca.getPointer();
104 // Alloca always returns a pointer in alloca address space, which may
105 // be different from the type defined by the language. For example,
106 // in C++ the auto variables are in the default address space. Therefore
107 // cast alloca to the default address space when necessary.
109 auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
110 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
111 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
112 // otherwise alloca is inserted at the current insertion point of the
113 // builder.
114 if (!ArraySize)
115 Builder.SetInsertPoint(getPostAllocaInsertPoint());
118 Builder.getPtrTy(DestAddrSpace), /*non-null*/ true);
119 }
120
121 return RawAddress(V, Ty, Align, KnownNonNull);
122}
123
124/// CreateTempAlloca - This creates an alloca and inserts it into the entry
125/// block if \p ArraySize is nullptr, otherwise inserts it at the current
126/// insertion point of the builder.
127llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
128 const Twine &Name,
129 llvm::Value *ArraySize) {
130 llvm::AllocaInst *Alloca;
131 if (ArraySize)
132 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
133 else
134 Alloca =
135 new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
136 ArraySize, Name, AllocaInsertPt->getIterator());
137 if (Allocas) {
138 Allocas->Add(Alloca);
139 }
140 return Alloca;
141}
142
143/// CreateDefaultAlignTempAlloca - This creates an alloca with the
144/// default alignment of the corresponding LLVM type, which is *not*
145/// guaranteed to be related in any way to the expected alignment of
146/// an AST type that might have been lowered to Ty.
148 const Twine &Name) {
149 CharUnits Align =
150 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
151 return CreateTempAlloca(Ty, Align, Name);
152}
153
156 return CreateTempAlloca(ConvertType(Ty), Align, Name);
157}
158
160 RawAddress *Alloca) {
161 // FIXME: Should we prefer the preferred type alignment here?
162 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
163}
164
166 const Twine &Name,
167 RawAddress *Alloca) {
169 /*ArraySize=*/nullptr, Alloca);
170
171 if (Ty->isConstantMatrixType()) {
172 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
173 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
174 ArrayTy->getNumElements());
175
176 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
178 }
179 return Result;
180}
181
183 CharUnits Align,
184 const Twine &Name) {
185 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
186}
187
189 const Twine &Name) {
190 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
191 Name);
192}
193
194/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
195/// expression and compare the result against zero, returning an Int1Ty value.
196llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
197 PGO.setCurrentStmt(E);
198 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
199 llvm::Value *MemPtr = EmitScalarExpr(E);
200 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
201 }
202
203 QualType BoolTy = getContext().BoolTy;
205 CGFPOptionsRAII FPOptsRAII(*this, E);
206 if (!E->getType()->isAnyComplexType())
207 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
208
210 Loc);
211}
212
213/// EmitIgnoredExpr - Emit code to compute the specified expression,
214/// ignoring the result.
216 if (E->isPRValue())
217 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
218
219 // if this is a bitfield-resulting conditional operator, we can special case
220 // emit this. The normal 'EmitLValue' version of this is particularly
221 // difficult to codegen for, since creating a single "LValue" for two
222 // different sized arguments here is not particularly doable.
223 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
225 if (CondOp->getObjectKind() == OK_BitField)
226 return EmitIgnoredConditionalOperator(CondOp);
227 }
228
229 // Just emit it as an l-value and drop the result.
230 EmitLValue(E);
231}
232
233/// EmitAnyExpr - Emit code to compute the specified expression which
234/// can have any type. The result is returned as an RValue struct.
235/// If this is an aggregate expression, AggSlot indicates where the
236/// result should be returned.
238 AggValueSlot aggSlot,
239 bool ignoreResult) {
240 switch (getEvaluationKind(E->getType())) {
241 case TEK_Scalar:
242 return RValue::get(EmitScalarExpr(E, ignoreResult));
243 case TEK_Complex:
244 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
245 case TEK_Aggregate:
246 if (!ignoreResult && aggSlot.isIgnored())
247 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
248 EmitAggExpr(E, aggSlot);
249 return aggSlot.asRValue();
250 }
251 llvm_unreachable("bad evaluation kind");
252}
253
254/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
255/// always be accessible even if no aggregate location is provided.
258
260 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
261 return EmitAnyExpr(E, AggSlot);
262}
263
264/// EmitAnyExprToMem - Evaluate an expression into a given memory
265/// location.
267 Address Location,
268 Qualifiers Quals,
269 bool IsInit) {
270 // FIXME: This function should take an LValue as an argument.
271 switch (getEvaluationKind(E->getType())) {
272 case TEK_Complex:
274 /*isInit*/ false);
275 return;
276
277 case TEK_Aggregate: {
278 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
283 return;
284 }
285
286 case TEK_Scalar: {
287 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
288 LValue LV = MakeAddrLValue(Location, E->getType());
290 return;
291 }
292 }
293 llvm_unreachable("bad evaluation kind");
294}
295
297 const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) {
298 QualType Type = LV.getType();
299 switch (getEvaluationKind(Type)) {
300 case TEK_Complex:
301 EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
302 return;
303 case TEK_Aggregate:
307 AggValueSlot::MayOverlap, IsZeroed));
308 return;
309 case TEK_Scalar:
310 if (LV.isSimple())
311 EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
312 else
314 return;
315 }
316 llvm_unreachable("bad evaluation kind");
317}
318
319static void
321 const Expr *E, Address ReferenceTemporary) {
322 // Objective-C++ ARC:
323 // If we are binding a reference to a temporary that has ownership, we
324 // need to perform retain/release operations on the temporary.
325 //
326 // FIXME: This should be looking at E, not M.
327 if (auto Lifetime = M->getType().getObjCLifetime()) {
328 switch (Lifetime) {
331 // Carry on to normal cleanup handling.
332 break;
333
335 // Nothing to do; cleaned up by an autorelease pool.
336 return;
337
340 switch (StorageDuration Duration = M->getStorageDuration()) {
341 case SD_Static:
342 // Note: we intentionally do not register a cleanup to release
343 // the object on program termination.
344 return;
345
346 case SD_Thread:
347 // FIXME: We should probably register a cleanup in this case.
348 return;
349
350 case SD_Automatic:
354 if (Lifetime == Qualifiers::OCL_Strong) {
355 const ValueDecl *VD = M->getExtendingDecl();
356 bool Precise = isa_and_nonnull<VarDecl>(VD) &&
357 VD->hasAttr<ObjCPreciseLifetimeAttr>();
361 } else {
362 // __weak objects always get EH cleanups; otherwise, exceptions
363 // could cause really nasty crashes instead of mere leaks.
366 }
367 if (Duration == SD_FullExpression)
368 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
369 M->getType(), *Destroy,
371 else
372 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
373 M->getType(),
374 *Destroy, CleanupKind & EHCleanup);
375 return;
376
377 case SD_Dynamic:
378 llvm_unreachable("temporary cannot have dynamic storage duration");
379 }
380 llvm_unreachable("unknown storage duration");
381 }
382 }
383
384 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
385 if (const RecordType *RT =
387 // Get the destructor for the reference temporary.
388 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
389 if (!ClassDecl->hasTrivialDestructor())
390 ReferenceTemporaryDtor = ClassDecl->getDestructor();
391 }
392
393 if (!ReferenceTemporaryDtor)
394 return;
395
396 // Call the destructor for the temporary.
397 switch (M->getStorageDuration()) {
398 case SD_Static:
399 case SD_Thread: {
400 llvm::FunctionCallee CleanupFn;
401 llvm::Constant *CleanupArg;
402 if (E->getType()->isArrayType()) {
404 ReferenceTemporary, E->getType(),
406 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
407 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
408 } else {
409 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
410 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
411 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
412 }
414 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
415 break;
416 }
417
419 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
421 CGF.getLangOpts().Exceptions);
422 break;
423
424 case SD_Automatic:
426 ReferenceTemporary, E->getType(),
428 CGF.getLangOpts().Exceptions);
429 break;
430
431 case SD_Dynamic:
432 llvm_unreachable("temporary cannot have dynamic storage duration");
433 }
434}
435
438 const Expr *Inner,
439 RawAddress *Alloca = nullptr) {
440 auto &TCG = CGF.getTargetHooks();
441 switch (M->getStorageDuration()) {
443 case SD_Automatic: {
444 // If we have a constant temporary array or record try to promote it into a
445 // constant global under the same rules a normal constant would've been
446 // promoted. This is easier on the optimizer and generally emits fewer
447 // instructions.
448 QualType Ty = Inner->getType();
449 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
450 (Ty->isArrayType() || Ty->isRecordType()) &&
451 Ty.isConstantStorage(CGF.getContext(), true, false))
452 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
453 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
454 auto *GV = new llvm::GlobalVariable(
455 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
456 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
457 llvm::GlobalValue::NotThreadLocal,
459 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
460 GV->setAlignment(alignment.getAsAlign());
461 llvm::Constant *C = GV;
462 if (AS != LangAS::Default)
463 C = TCG.performAddrSpaceCast(
464 CGF.CGM, GV, AS, LangAS::Default,
465 llvm::PointerType::get(
466 CGF.getLLVMContext(),
468 // FIXME: Should we put the new global into a COMDAT?
469 return RawAddress(C, GV->getValueType(), alignment);
470 }
471 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
472 }
473 case SD_Thread:
474 case SD_Static:
475 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
476
477 case SD_Dynamic:
478 llvm_unreachable("temporary can't have dynamic storage duration");
479 }
480 llvm_unreachable("unknown storage duration");
481}
482
483/// Helper method to check if the underlying ABI is AAPCS
484static bool isAAPCS(const TargetInfo &TargetInfo) {
485 return TargetInfo.getABI().starts_with("aapcs");
486}
487
490 const Expr *E = M->getSubExpr();
491
492 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
493 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
494 "Reference should never be pseudo-strong!");
495
496 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
497 // as that will cause the lifetime adjustment to be lost for ARC
498 auto ownership = M->getType().getObjCLifetime();
499 if (ownership != Qualifiers::OCL_None &&
500 ownership != Qualifiers::OCL_ExplicitNone) {
502 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
503 llvm::Type *Ty = ConvertTypeForMem(E->getType());
504 Object = Object.withElementType(Ty);
505
506 // createReferenceTemporary will promote the temporary to a global with a
507 // constant initializer if it can. It can only do this to a value of
508 // ARC-manageable type if the value is global and therefore "immune" to
509 // ref-counting operations. Therefore we have no need to emit either a
510 // dynamic initialization or a cleanup and we can just return the address
511 // of the temporary.
512 if (Var->hasInitializer())
513 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
514
515 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
516 }
517 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
519
520 switch (getEvaluationKind(E->getType())) {
521 default: llvm_unreachable("expected scalar or aggregate expression");
522 case TEK_Scalar:
523 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
524 break;
525 case TEK_Aggregate: {
532 break;
533 }
534 }
535
536 pushTemporaryCleanup(*this, M, E, Object);
537 return RefTempDst;
538 }
539
542 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
543
544 for (const auto &Ignored : CommaLHSs)
545 EmitIgnoredExpr(Ignored);
546
547 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
548 if (opaque->getType()->isRecordType()) {
549 assert(Adjustments.empty());
550 return EmitOpaqueValueLValue(opaque);
551 }
552 }
553
554 // Create and initialize the reference temporary.
555 RawAddress Alloca = Address::invalid();
556 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
557 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
558 Object.getPointer()->stripPointerCasts())) {
559 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
560 Object = Object.withElementType(TemporaryType);
561 // If the temporary is a global and has a constant initializer or is a
562 // constant temporary that we promoted to a global, we may have already
563 // initialized it.
564 if (!Var->hasInitializer()) {
565 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
566 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
567 }
568 } else {
569 switch (M->getStorageDuration()) {
570 case SD_Automatic:
571 if (auto *Size = EmitLifetimeStart(
572 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
573 Alloca.getPointer())) {
574 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
575 Alloca, Size);
576 }
577 break;
578
579 case SD_FullExpression: {
580 if (!ShouldEmitLifetimeMarkers)
581 break;
582
583 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
584 // marker. Instead, start the lifetime of a conditional temporary earlier
585 // so that it's unconditional. Don't do this with sanitizers which need
586 // more precise lifetime marks. However when inside an "await.suspend"
587 // block, we should always avoid conditional cleanup because it creates
588 // boolean marker that lives across await_suspend, which can destroy coro
589 // frame.
590 ConditionalEvaluation *OldConditional = nullptr;
591 CGBuilderTy::InsertPoint OldIP;
593 ((!SanOpts.has(SanitizerKind::HWAddress) &&
594 !SanOpts.has(SanitizerKind::Memory) &&
595 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
596 inSuspendBlock())) {
597 OldConditional = OutermostConditional;
598 OutermostConditional = nullptr;
599
600 OldIP = Builder.saveIP();
601 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
602 Builder.restoreIP(CGBuilderTy::InsertPoint(
603 Block, llvm::BasicBlock::iterator(Block->back())));
604 }
605
606 if (auto *Size = EmitLifetimeStart(
607 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
608 Alloca.getPointer())) {
609 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
610 Size);
611 }
612
613 if (OldConditional) {
614 OutermostConditional = OldConditional;
615 Builder.restoreIP(OldIP);
616 }
617 break;
618 }
619
620 default:
621 break;
622 }
623 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
624 }
625 pushTemporaryCleanup(*this, M, E, Object);
626
627 // Perform derived-to-base casts and/or field accesses, to get from the
628 // temporary object we created (and, potentially, for which we extended
629 // the lifetime) to the subobject we're binding the reference to.
630 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
631 switch (Adjustment.Kind) {
633 Object =
634 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
635 Adjustment.DerivedToBase.BasePath->path_begin(),
636 Adjustment.DerivedToBase.BasePath->path_end(),
637 /*NullCheckValue=*/ false, E->getExprLoc());
638 break;
639
642 LV = EmitLValueForField(LV, Adjustment.Field);
643 assert(LV.isSimple() &&
644 "materialized temporary field is not a simple lvalue");
645 Object = LV.getAddress();
646 break;
647 }
648
650 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
652 Adjustment.Ptr.MPT);
653 break;
654 }
655 }
656 }
657
658 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
659}
660
661RValue
663 // Emit the expression as an lvalue.
664 LValue LV = EmitLValue(E);
665 assert(LV.isSimple());
666 llvm::Value *Value = LV.getPointer(*this);
667
669 // C++11 [dcl.ref]p5 (as amended by core issue 453):
670 // If a glvalue to which a reference is directly bound designates neither
671 // an existing object or function of an appropriate type nor a region of
672 // storage of suitable size and alignment to contain an object of the
673 // reference's type, the behavior is undefined.
674 QualType Ty = E->getType();
676 }
677
678 return RValue::get(Value);
679}
680
681
682/// getAccessedFieldNo - Given an encoded value and a result number, return the
683/// input field number being accessed.
684unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
685 const llvm::Constant *Elts) {
686 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
687 ->getZExtValue();
688}
689
690static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
691 llvm::Value *Ptr) {
692 llvm::Value *A0 =
693 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
694 llvm::Value *A1 =
695 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
696 return Builder.CreateXor(Acc, A1);
697}
698
699bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
700 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
702}
703
704bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
706 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
707 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
710}
711
713 return SanOpts.has(SanitizerKind::Null) ||
714 SanOpts.has(SanitizerKind::Alignment) ||
715 SanOpts.has(SanitizerKind::ObjectSize) ||
716 SanOpts.has(SanitizerKind::Vptr);
717}
718
720 llvm::Value *Ptr, QualType Ty,
721 CharUnits Alignment,
722 SanitizerSet SkippedChecks,
723 llvm::Value *ArraySize) {
725 return;
726
727 // Don't check pointers outside the default address space. The null check
728 // isn't correct, the object-size check isn't supported by LLVM, and we can't
729 // communicate the addresses to the runtime handler for the vptr check.
730 if (Ptr->getType()->getPointerAddressSpace())
731 return;
732
733 // Don't check pointers to volatile data. The behavior here is implementation-
734 // defined.
735 if (Ty.isVolatileQualified())
736 return;
737
738 SanitizerScope SanScope(this);
739
741 Checks;
742 llvm::BasicBlock *Done = nullptr;
743
744 // Quickly determine whether we have a pointer to an alloca. It's possible
745 // to skip null checks, and some alignment checks, for these pointers. This
746 // can reduce compile-time significantly.
747 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
748
749 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
750 llvm::Value *IsNonNull = nullptr;
751 bool IsGuaranteedNonNull =
752 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
753 bool AllowNullPointers = isNullPointerAllowed(TCK);
754 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
755 !IsGuaranteedNonNull) {
756 // The glvalue must not be an empty glvalue.
757 IsNonNull = Builder.CreateIsNotNull(Ptr);
758
759 // The IR builder can constant-fold the null check if the pointer points to
760 // a constant.
761 IsGuaranteedNonNull = IsNonNull == True;
762
763 // Skip the null check if the pointer is known to be non-null.
764 if (!IsGuaranteedNonNull) {
765 if (AllowNullPointers) {
766 // When performing pointer casts, it's OK if the value is null.
767 // Skip the remaining checks in that case.
768 Done = createBasicBlock("null");
769 llvm::BasicBlock *Rest = createBasicBlock("not.null");
770 Builder.CreateCondBr(IsNonNull, Rest, Done);
771 EmitBlock(Rest);
772 } else {
773 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::SO_Null));
774 }
775 }
776 }
777
778 if (SanOpts.has(SanitizerKind::ObjectSize) &&
779 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
780 !Ty->isIncompleteType()) {
782 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
783 if (ArraySize)
784 Size = Builder.CreateMul(Size, ArraySize);
785
786 // Degenerate case: new X[0] does not need an objectsize check.
787 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
788 if (!ConstantSize || !ConstantSize->isNullValue()) {
789 // The glvalue must refer to a large enough storage region.
790 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
791 // to check this.
792 // FIXME: Get object address space
793 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
794 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
795 llvm::Value *Min = Builder.getFalse();
796 llvm::Value *NullIsUnknown = Builder.getFalse();
797 llvm::Value *Dynamic = Builder.getFalse();
798 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
799 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
800 Checks.push_back(
801 std::make_pair(LargeEnough, SanitizerKind::SO_ObjectSize));
802 }
803 }
804
805 llvm::MaybeAlign AlignVal;
806 llvm::Value *PtrAsInt = nullptr;
807
808 if (SanOpts.has(SanitizerKind::Alignment) &&
809 !SkippedChecks.has(SanitizerKind::Alignment)) {
810 AlignVal = Alignment.getAsMaybeAlign();
811 if (!Ty->isIncompleteType() && !AlignVal)
812 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
813 /*ForPointeeType=*/true)
815
816 // The glvalue must be suitably aligned.
817 if (AlignVal && *AlignVal > llvm::Align(1) &&
818 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
819 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
820 llvm::Value *Align = Builder.CreateAnd(
821 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
822 llvm::Value *Aligned =
823 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
824 if (Aligned != True)
825 Checks.push_back(std::make_pair(Aligned, SanitizerKind::SO_Alignment));
826 }
827 }
828
829 if (Checks.size() > 0) {
830 llvm::Constant *StaticData[] = {
832 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
833 llvm::ConstantInt::get(Int8Ty, TCK)};
834 EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
835 PtrAsInt ? PtrAsInt : Ptr);
836 }
837
838 // If possible, check that the vptr indicates that there is a subobject of
839 // type Ty at offset zero within this object.
840 //
841 // C++11 [basic.life]p5,6:
842 // [For storage which does not refer to an object within its lifetime]
843 // The program has undefined behavior if:
844 // -- the [pointer or glvalue] is used to access a non-static data member
845 // or call a non-static member function
846 if (SanOpts.has(SanitizerKind::Vptr) &&
847 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
848 // Ensure that the pointer is non-null before loading it. If there is no
849 // compile-time guarantee, reuse the run-time null check or emit a new one.
850 if (!IsGuaranteedNonNull) {
851 if (!IsNonNull)
852 IsNonNull = Builder.CreateIsNotNull(Ptr);
853 if (!Done)
854 Done = createBasicBlock("vptr.null");
855 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
856 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
857 EmitBlock(VptrNotNull);
858 }
859
860 // Compute a deterministic hash of the mangled name of the type.
861 SmallString<64> MangledName;
862 llvm::raw_svector_ostream Out(MangledName);
864 Out);
865
866 // Contained in NoSanitizeList based on the mangled type.
867 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
868 Out.str())) {
869 // Load the vptr, and mix it with TypeHash.
870 llvm::Value *TypeHash =
871 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
872
873 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
874 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
875 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
876 Ty->getAsCXXRecordDecl(),
878 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
879
880 llvm::Value *Hash =
881 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
882 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
883
884 // Look the hash up in our cache.
885 const int CacheSize = 128;
886 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
887 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
888 "__ubsan_vptr_type_cache");
889 llvm::Value *Slot = Builder.CreateAnd(Hash,
890 llvm::ConstantInt::get(IntPtrTy,
891 CacheSize-1));
892 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
893 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
894 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
896
897 // If the hash isn't in the cache, call a runtime handler to perform the
898 // hard work of checking whether the vptr is for an object of the right
899 // type. This will either fill in the cache and return, or produce a
900 // diagnostic.
901 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
902 llvm::Constant *StaticData[] = {
906 llvm::ConstantInt::get(Int8Ty, TCK)
907 };
908 llvm::Value *DynamicData[] = { Ptr, Hash };
909 EmitCheck(std::make_pair(EqualHash, SanitizerKind::SO_Vptr),
910 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
911 DynamicData);
912 }
913 }
914
915 if (Done) {
916 Builder.CreateBr(Done);
917 EmitBlock(Done);
918 }
919}
920
922 QualType EltTy) {
924 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
925 if (!EltSize)
926 return nullptr;
927
928 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
929 if (!ArrayDeclRef)
930 return nullptr;
931
932 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
933 if (!ParamDecl)
934 return nullptr;
935
936 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
937 if (!POSAttr)
938 return nullptr;
939
940 // Don't load the size if it's a lower bound.
941 int POSType = POSAttr->getType();
942 if (POSType != 0 && POSType != 1)
943 return nullptr;
944
945 // Find the implicit size parameter.
946 auto PassedSizeIt = SizeArguments.find(ParamDecl);
947 if (PassedSizeIt == SizeArguments.end())
948 return nullptr;
949
950 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
951 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
952 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
953 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
954 C.getSizeType(), E->getExprLoc());
955 llvm::Value *SizeOfElement =
956 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
957 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
958}
959
960/// If Base is known to point to the start of an array, return the length of
961/// that array. Return 0 if the length cannot be determined.
962static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
963 const Expr *Base,
964 QualType &IndexedType,
966 StrictFlexArraysLevel) {
967 // For the vector indexing extension, the bound is the number of elements.
968 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
969 IndexedType = Base->getType();
970 return CGF.Builder.getInt32(VT->getNumElements());
971 }
972
973 Base = Base->IgnoreParens();
974
975 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
976 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
977 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
978 StrictFlexArraysLevel)) {
979 CodeGenFunction::SanitizerScope SanScope(&CGF);
980
981 IndexedType = CE->getSubExpr()->getType();
982 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
983 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
984 return CGF.Builder.getInt(CAT->getSize());
985
986 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
987 return CGF.getVLASize(VAT).NumElts;
988 // Ignore pass_object_size here. It's not applicable on decayed pointers.
989 }
990 }
991
992 CodeGenFunction::SanitizerScope SanScope(&CGF);
993
994 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
995 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
996 IndexedType = Base->getType();
997 return POS;
998 }
999
1000 return nullptr;
1001}
1002
1003namespace {
1004
1005/// \p StructAccessBase returns the base \p Expr of a field access. It returns
1006/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
1007///
1008/// p in p-> a.b.c
1009///
1010/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
1011/// looking for:
1012///
1013/// struct s {
1014/// struct s *ptr;
1015/// int count;
1016/// char array[] __attribute__((counted_by(count)));
1017/// };
1018///
1019/// If we have an expression like \p p->ptr->array[index], we want the
1020/// \p MemberExpr for \p p->ptr instead of \p p.
1021class StructAccessBase
1022 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1023 const RecordDecl *ExpectedRD;
1024
1025 bool IsExpectedRecordDecl(const Expr *E) const {
1026 QualType Ty = E->getType();
1027 if (Ty->isPointerType())
1028 Ty = Ty->getPointeeType();
1029 return ExpectedRD == Ty->getAsRecordDecl();
1030 }
1031
1032public:
1033 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1034
1035 //===--------------------------------------------------------------------===//
1036 // Visitor Methods
1037 //===--------------------------------------------------------------------===//
1038
1039 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1040 // horrors like this:
1041 //
1042 // struct S {
1043 // int x, y;
1044 // int blah[] __attribute__((counted_by(x)));
1045 // } s;
1046 //
1047 // int foo(int index, int val) {
1048 // int (S::*IHatePMDs)[] = &S::blah;
1049 // (s.*IHatePMDs)[index] = val;
1050 // }
1051
1052 const Expr *Visit(const Expr *E) {
1054 }
1055
1056 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1057
1058 // These are the types we expect to return (in order of most to least
1059 // likely):
1060 //
1061 // 1. DeclRefExpr - This is the expression for the base of the structure.
1062 // It's exactly what we want to build an access to the \p counted_by
1063 // field.
1064 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1065 // as the flexble array member's lexical enclosing \p RecordDecl. This
1066 // allows us to catch things like: "p->p->array"
1067 // 3. CompoundLiteralExpr - This is for people who create something
1068 // heretical like (struct foo has a flexible array member):
1069 //
1070 // (struct foo){ 1, 2 }.blah[idx];
1071 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1072 return IsExpectedRecordDecl(E) ? E : nullptr;
1073 }
1074 const Expr *VisitMemberExpr(const MemberExpr *E) {
1075 if (IsExpectedRecordDecl(E) && E->isArrow())
1076 return E;
1077 const Expr *Res = Visit(E->getBase());
1078 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1079 }
1080 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1081 return IsExpectedRecordDecl(E) ? E : nullptr;
1082 }
1083 const Expr *VisitCallExpr(const CallExpr *E) {
1084 return IsExpectedRecordDecl(E) ? E : nullptr;
1085 }
1086
1087 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1088 if (IsExpectedRecordDecl(E))
1089 return E;
1090 return Visit(E->getBase());
1091 }
1092 const Expr *VisitCastExpr(const CastExpr *E) {
1093 if (E->getCastKind() == CK_LValueToRValue)
1094 return IsExpectedRecordDecl(E) ? E : nullptr;
1095 return Visit(E->getSubExpr());
1096 }
1097 const Expr *VisitParenExpr(const ParenExpr *E) {
1098 return Visit(E->getSubExpr());
1099 }
1100 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1101 return Visit(E->getSubExpr());
1102 }
1103 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1104 return Visit(E->getSubExpr());
1105 }
1106};
1107
1108} // end anonymous namespace
1109
1111
1113 const FieldDecl *Field,
1114 RecIndicesTy &Indices) {
1115 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1116 int64_t FieldNo = -1;
1117 for (const FieldDecl *FD : RD->fields()) {
1118 if (!Layout.containsFieldDecl(FD))
1119 // This could happen if the field has a struct type that's empty. I don't
1120 // know why either.
1121 continue;
1122
1123 FieldNo = Layout.getLLVMFieldNo(FD);
1124 if (FD == Field) {
1125 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1126 return true;
1127 }
1128
1129 QualType Ty = FD->getType();
1130 if (Ty->isRecordType()) {
1131 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1132 if (RD->isUnion())
1133 FieldNo = 0;
1134 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1135 return true;
1136 }
1137 }
1138 }
1139
1140 return false;
1141}
1142
1144 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1145 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1146
1147 // Find the base struct expr (i.e. p in p->a.b.c.d).
1148 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1149 if (!StructBase || StructBase->HasSideEffects(getContext()))
1150 return nullptr;
1151
1152 llvm::Value *Res = nullptr;
1153 if (StructBase->getType()->isPointerType()) {
1154 LValueBaseInfo BaseInfo;
1155 TBAAAccessInfo TBAAInfo;
1156 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1157 Res = Addr.emitRawPointer(*this);
1158 } else if (StructBase->isLValue()) {
1159 LValue LV = EmitLValue(StructBase);
1160 Address Addr = LV.getAddress();
1161 Res = Addr.emitRawPointer(*this);
1162 } else {
1163 return nullptr;
1164 }
1165
1166 RecIndicesTy Indices;
1167 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1168 if (Indices.empty())
1169 return nullptr;
1170
1171 Indices.push_back(Builder.getInt32(0));
1173 ConvertType(QualType(RD->getTypeForDecl(), 0)), Res,
1174 RecIndicesTy(llvm::reverse(Indices)), "counted_by.gep");
1175}
1176
1177/// This method is typically called in contexts where we can't generate
1178/// side-effects, like in __builtin_dynamic_object_size. When finding
1179/// expressions, only choose those that have either already been emitted or can
1180/// be loaded without side-effects.
1181///
1182/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1183/// within the top-level struct.
1184/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1186 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1187 if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl))
1188 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), GEP,
1189 getIntAlign(), "counted_by.load");
1190 return nullptr;
1191}
1192
1193void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
1194 llvm::Value *Index, QualType IndexType,
1195 bool Accessed) {
1196 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1197 "should not be called unless adding bounds checks");
1198 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1199 getLangOpts().getStrictFlexArraysLevel();
1200 QualType IndexedType;
1201 llvm::Value *Bound =
1202 getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1203
1204 EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1205}
1206
1207void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1208 llvm::Value *Index,
1209 QualType IndexType,
1210 QualType IndexedType, bool Accessed) {
1211 if (!Bound)
1212 return;
1213
1214 SanitizerScope SanScope(this);
1215
1216 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1217 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1218 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1219
1220 llvm::Constant *StaticData[] = {
1222 EmitCheckTypeDescriptor(IndexedType),
1223 EmitCheckTypeDescriptor(IndexType)
1224 };
1225 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1226 : Builder.CreateICmpULE(IndexVal, BoundVal);
1227 EmitCheck(std::make_pair(Check, SanitizerKind::SO_ArrayBounds),
1228 SanitizerHandler::OutOfBounds, StaticData, Index);
1229}
1230
1233 bool isInc, bool isPre) {
1235
1236 llvm::Value *NextVal;
1237 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1238 uint64_t AmountVal = isInc ? 1 : -1;
1239 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1240
1241 // Add the inc/dec to the real part.
1242 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1243 } else {
1244 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1245 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1246 if (!isInc)
1247 FVal.changeSign();
1248 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1249
1250 // Add the inc/dec to the real part.
1251 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1252 }
1253
1254 ComplexPairTy IncVal(NextVal, InVal.second);
1255
1256 // Store the updated result through the lvalue.
1257 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1258 if (getLangOpts().OpenMP)
1260 E->getSubExpr());
1261
1262 // If this is a postinc, return the value read from memory, otherwise use the
1263 // updated value.
1264 return isPre ? IncVal : InVal;
1265}
1266
1268 CodeGenFunction *CGF) {
1269 // Bind VLAs in the cast type.
1270 if (CGF && E->getType()->isVariablyModifiedType())
1272
1273 if (CGDebugInfo *DI = getModuleDebugInfo())
1274 DI->EmitExplicitCastType(E->getType());
1275}
1276
1277//===----------------------------------------------------------------------===//
1278// LValue Expression Emission
1279//===----------------------------------------------------------------------===//
1280
1282 TBAAAccessInfo *TBAAInfo,
1283 KnownNonNull_t IsKnownNonNull,
1284 CodeGenFunction &CGF) {
1285 // We allow this with ObjC object pointers because of fragile ABIs.
1286 assert(E->getType()->isPointerType() ||
1288 E = E->IgnoreParens();
1289
1290 // Casts:
1291 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1292 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1293 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1294
1295 switch (CE->getCastKind()) {
1296 // Non-converting casts (but not C's implicit conversion from void*).
1297 case CK_BitCast:
1298 case CK_NoOp:
1299 case CK_AddressSpaceConversion:
1300 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1301 if (PtrTy->getPointeeType()->isVoidType())
1302 break;
1303
1304 LValueBaseInfo InnerBaseInfo;
1305 TBAAAccessInfo InnerTBAAInfo;
1307 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1308 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1309 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1310
1311 if (isa<ExplicitCastExpr>(CE)) {
1312 LValueBaseInfo TargetTypeBaseInfo;
1313 TBAAAccessInfo TargetTypeTBAAInfo;
1315 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1316 if (TBAAInfo)
1317 *TBAAInfo =
1318 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1319 // If the source l-value is opaque, honor the alignment of the
1320 // casted-to type.
1321 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1322 if (BaseInfo)
1323 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1324 Addr.setAlignment(Align);
1325 }
1326 }
1327
1328 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1329 CE->getCastKind() == CK_BitCast) {
1330 if (auto PT = E->getType()->getAs<PointerType>())
1331 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1332 /*MayBeNull=*/true,
1334 CE->getBeginLoc());
1335 }
1336
1337 llvm::Type *ElemTy =
1339 Addr = Addr.withElementType(ElemTy);
1340 if (CE->getCastKind() == CK_AddressSpaceConversion)
1341 Addr = CGF.Builder.CreateAddrSpaceCast(
1342 Addr, CGF.ConvertType(E->getType()), ElemTy);
1343 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1344 CE->getType());
1345 }
1346 break;
1347
1348 // Array-to-pointer decay.
1349 case CK_ArrayToPointerDecay:
1350 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1351
1352 // Derived-to-base conversions.
1353 case CK_UncheckedDerivedToBase:
1354 case CK_DerivedToBase: {
1355 // TODO: Support accesses to members of base classes in TBAA. For now, we
1356 // conservatively pretend that the complete object is of the base class
1357 // type.
1358 if (TBAAInfo)
1359 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1361 CE->getSubExpr(), BaseInfo, nullptr,
1362 (KnownNonNull_t)(IsKnownNonNull ||
1363 CE->getCastKind() == CK_UncheckedDerivedToBase));
1364 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1365 return CGF.GetAddressOfBaseClass(
1366 Addr, Derived, CE->path_begin(), CE->path_end(),
1367 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1368 }
1369
1370 // TODO: Is there any reason to treat base-to-derived conversions
1371 // specially?
1372 default:
1373 break;
1374 }
1375 }
1376
1377 // Unary &.
1378 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1379 if (UO->getOpcode() == UO_AddrOf) {
1380 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1381 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1382 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1383 return LV.getAddress();
1384 }
1385 }
1386
1387 // std::addressof and variants.
1388 if (auto *Call = dyn_cast<CallExpr>(E)) {
1389 switch (Call->getBuiltinCallee()) {
1390 default:
1391 break;
1392 case Builtin::BIaddressof:
1393 case Builtin::BI__addressof:
1394 case Builtin::BI__builtin_addressof: {
1395 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1396 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1397 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1398 return LV.getAddress();
1399 }
1400 }
1401 }
1402
1403 // TODO: conditional operators, comma.
1404
1405 // Otherwise, use the alignment of the type.
1408 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1409}
1410
1411/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1412/// derive a more accurate bound on the alignment of the pointer.
1414 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1415 KnownNonNull_t IsKnownNonNull) {
1416 Address Addr =
1417 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1418 if (IsKnownNonNull && !Addr.isKnownNonNull())
1419 Addr.setKnownNonNull();
1420 return Addr;
1421}
1422
1424 llvm::Value *V = RV.getScalarVal();
1425 if (auto MPT = T->getAs<MemberPointerType>())
1426 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1427 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1428}
1429
1431 if (Ty->isVoidType())
1432 return RValue::get(nullptr);
1433
1434 switch (getEvaluationKind(Ty)) {
1435 case TEK_Complex: {
1436 llvm::Type *EltTy =
1438 llvm::Value *U = llvm::UndefValue::get(EltTy);
1439 return RValue::getComplex(std::make_pair(U, U));
1440 }
1441
1442 // If this is a use of an undefined aggregate type, the aggregate must have an
1443 // identifiable address. Just because the contents of the value are undefined
1444 // doesn't mean that the address can't be taken and compared.
1445 case TEK_Aggregate: {
1446 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1447 return RValue::getAggregate(DestPtr);
1448 }
1449
1450 case TEK_Scalar:
1451 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1452 }
1453 llvm_unreachable("bad evaluation kind");
1454}
1455
1457 const char *Name) {
1458 ErrorUnsupported(E, Name);
1459 return GetUndefRValue(E->getType());
1460}
1461
1463 const char *Name) {
1464 ErrorUnsupported(E, Name);
1465 llvm::Type *ElTy = ConvertType(E->getType());
1466 llvm::Type *Ty = UnqualPtrTy;
1467 return MakeAddrLValue(
1468 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1469}
1470
1471bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1472 const Expr *Base = Obj;
1473 while (!isa<CXXThisExpr>(Base)) {
1474 // The result of a dynamic_cast can be null.
1475 if (isa<CXXDynamicCastExpr>(Base))
1476 return false;
1477
1478 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1479 Base = CE->getSubExpr();
1480 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1481 Base = PE->getSubExpr();
1482 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1483 if (UO->getOpcode() == UO_Extension)
1484 Base = UO->getSubExpr();
1485 else
1486 return false;
1487 } else {
1488 return false;
1489 }
1490 }
1491 return true;
1492}
1493
1494LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1495 LValue LV;
1496 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1497 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1498 else
1499 LV = EmitLValue(E);
1500 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1501 SanitizerSet SkippedChecks;
1502 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1503 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1504 if (IsBaseCXXThis)
1505 SkippedChecks.set(SanitizerKind::Alignment, true);
1506 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1507 SkippedChecks.set(SanitizerKind::Null, true);
1508 }
1509 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1510 }
1511 return LV;
1512}
1513
1514/// EmitLValue - Emit code to compute a designator that specifies the location
1515/// of the expression.
1516///
1517/// This can return one of two things: a simple address or a bitfield reference.
1518/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1519/// an LLVM pointer type.
1520///
1521/// If this returns a bitfield reference, nothing about the pointee type of the
1522/// LLVM value is known: For example, it may not be a pointer to an integer.
1523///
1524/// If this returns a normal address, and if the lvalue's C type is fixed size,
1525/// this method guarantees that the returned pointer type will point to an LLVM
1526/// type of the same size of the lvalue's type. If the lvalue has a variable
1527/// length type, this is not possible.
1528///
1530 KnownNonNull_t IsKnownNonNull) {
1531 // Running with sufficient stack space to avoid deeply nested expressions
1532 // cause a stack overflow.
1533 LValue LV;
1535 E->getExprLoc(), [&] { LV = EmitLValueHelper(E, IsKnownNonNull); });
1536
1537 if (IsKnownNonNull && !LV.isKnownNonNull())
1538 LV.setKnownNonNull();
1539 return LV;
1540}
1541
1543 const ASTContext &Ctx) {
1544 const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1545 if (isa<OpaqueValueExpr>(SE))
1546 return SE->getType();
1547 return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1548}
1549
1550LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1551 KnownNonNull_t IsKnownNonNull) {
1552 ApplyDebugLocation DL(*this, E);
1553 switch (E->getStmtClass()) {
1554 default: return EmitUnsupportedLValue(E, "l-value expression");
1555
1556 case Expr::ObjCPropertyRefExprClass:
1557 llvm_unreachable("cannot emit a property reference directly");
1558
1559 case Expr::ObjCSelectorExprClass:
1560 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1561 case Expr::ObjCIsaExprClass:
1562 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1563 case Expr::BinaryOperatorClass:
1564 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1565 case Expr::CompoundAssignOperatorClass: {
1566 QualType Ty = E->getType();
1567 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1568 Ty = AT->getValueType();
1569 if (!Ty->isAnyComplexType())
1570 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1571 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1572 }
1573 case Expr::CallExprClass:
1574 case Expr::CXXMemberCallExprClass:
1575 case Expr::CXXOperatorCallExprClass:
1576 case Expr::UserDefinedLiteralClass:
1577 return EmitCallExprLValue(cast<CallExpr>(E));
1578 case Expr::CXXRewrittenBinaryOperatorClass:
1579 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1580 IsKnownNonNull);
1581 case Expr::VAArgExprClass:
1582 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1583 case Expr::DeclRefExprClass:
1584 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1585 case Expr::ConstantExprClass: {
1586 const ConstantExpr *CE = cast<ConstantExpr>(E);
1587 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1589 return MakeNaturalAlignAddrLValue(Result, RetType);
1590 }
1591 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1592 }
1593 case Expr::ParenExprClass:
1594 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1595 case Expr::GenericSelectionExprClass:
1596 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1597 IsKnownNonNull);
1598 case Expr::PredefinedExprClass:
1599 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1600 case Expr::StringLiteralClass:
1601 return EmitStringLiteralLValue(cast<StringLiteral>(E));
1602 case Expr::ObjCEncodeExprClass:
1603 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1604 case Expr::PseudoObjectExprClass:
1605 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1606 case Expr::InitListExprClass:
1607 return EmitInitListLValue(cast<InitListExpr>(E));
1608 case Expr::CXXTemporaryObjectExprClass:
1609 case Expr::CXXConstructExprClass:
1610 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1611 case Expr::CXXBindTemporaryExprClass:
1612 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1613 case Expr::CXXUuidofExprClass:
1614 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1615 case Expr::LambdaExprClass:
1616 return EmitAggExprToLValue(E);
1617
1618 case Expr::ExprWithCleanupsClass: {
1619 const auto *cleanups = cast<ExprWithCleanups>(E);
1620 RunCleanupsScope Scope(*this);
1621 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1622 if (LV.isSimple()) {
1623 // Defend against branches out of gnu statement expressions surrounded by
1624 // cleanups.
1625 Address Addr = LV.getAddress();
1626 llvm::Value *V = Addr.getBasePointer();
1627 Scope.ForceCleanup({&V});
1628 Addr.replaceBasePointer(V);
1629 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1630 LV.getBaseInfo(), LV.getTBAAInfo());
1631 }
1632 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1633 // bitfield lvalue or some other non-simple lvalue?
1634 return LV;
1635 }
1636
1637 case Expr::CXXDefaultArgExprClass: {
1638 auto *DAE = cast<CXXDefaultArgExpr>(E);
1639 CXXDefaultArgExprScope Scope(*this, DAE);
1640 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1641 }
1642 case Expr::CXXDefaultInitExprClass: {
1643 auto *DIE = cast<CXXDefaultInitExpr>(E);
1644 CXXDefaultInitExprScope Scope(*this, DIE);
1645 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1646 }
1647 case Expr::CXXTypeidExprClass:
1648 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1649
1650 case Expr::ObjCMessageExprClass:
1651 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1652 case Expr::ObjCIvarRefExprClass:
1653 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1654 case Expr::StmtExprClass:
1655 return EmitStmtExprLValue(cast<StmtExpr>(E));
1656 case Expr::UnaryOperatorClass:
1657 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1658 case Expr::ArraySubscriptExprClass:
1659 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1660 case Expr::MatrixSubscriptExprClass:
1661 return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1662 case Expr::ArraySectionExprClass:
1663 return EmitArraySectionExpr(cast<ArraySectionExpr>(E));
1664 case Expr::ExtVectorElementExprClass:
1665 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1666 case Expr::CXXThisExprClass:
1668 case Expr::MemberExprClass:
1669 return EmitMemberExpr(cast<MemberExpr>(E));
1670 case Expr::CompoundLiteralExprClass:
1671 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1672 case Expr::ConditionalOperatorClass:
1673 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1674 case Expr::BinaryConditionalOperatorClass:
1675 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1676 case Expr::ChooseExprClass:
1677 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1678 case Expr::OpaqueValueExprClass:
1679 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1680 case Expr::SubstNonTypeTemplateParmExprClass:
1681 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1682 IsKnownNonNull);
1683 case Expr::ImplicitCastExprClass:
1684 case Expr::CStyleCastExprClass:
1685 case Expr::CXXFunctionalCastExprClass:
1686 case Expr::CXXStaticCastExprClass:
1687 case Expr::CXXDynamicCastExprClass:
1688 case Expr::CXXReinterpretCastExprClass:
1689 case Expr::CXXConstCastExprClass:
1690 case Expr::CXXAddrspaceCastExprClass:
1691 case Expr::ObjCBridgedCastExprClass:
1692 return EmitCastLValue(cast<CastExpr>(E));
1693
1694 case Expr::MaterializeTemporaryExprClass:
1695 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1696
1697 case Expr::CoawaitExprClass:
1698 return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1699 case Expr::CoyieldExprClass:
1700 return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1701 case Expr::PackIndexingExprClass:
1702 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1703 case Expr::HLSLOutArgExprClass:
1704 llvm_unreachable("cannot emit a HLSL out argument directly");
1705 }
1706}
1707
1708/// Given an object of the given canonical type, can we safely copy a
1709/// value out of it based on its initializer?
1711 assert(type.isCanonical());
1712 assert(!type->isReferenceType());
1713
1714 // Must be const-qualified but non-volatile.
1715 Qualifiers qs = type.getLocalQualifiers();
1716 if (!qs.hasConst() || qs.hasVolatile()) return false;
1717
1718 // Otherwise, all object types satisfy this except C++ classes with
1719 // mutable subobjects or non-trivial copy/destroy behavior.
1720 if (const auto *RT = dyn_cast<RecordType>(type))
1721 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1722 if (RD->hasMutableFields() || !RD->isTrivial())
1723 return false;
1724
1725 return true;
1726}
1727
1728/// Can we constant-emit a load of a reference to a variable of the
1729/// given type? This is different from predicates like
1730/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1731/// in situations that don't necessarily satisfy the language's rules
1732/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1733/// to do this with const float variables even if those variables
1734/// aren't marked 'constexpr'.
1742 type = type.getCanonicalType();
1743 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1744 if (isConstantEmittableObjectType(ref->getPointeeType()))
1746 return CEK_AsReferenceOnly;
1747 }
1749 return CEK_AsValueOnly;
1750 return CEK_None;
1751}
1752
1753/// Try to emit a reference to the given value without producing it as
1754/// an l-value. This is just an optimization, but it avoids us needing
1755/// to emit global copies of variables if they're named without triggering
1756/// a formal use in a context where we can't emit a direct reference to them,
1757/// for instance if a block or lambda or a member of a local class uses a
1758/// const int variable or constexpr variable from an enclosing function.
1759CodeGenFunction::ConstantEmission
1761 ValueDecl *value = refExpr->getDecl();
1762
1763 // The value needs to be an enum constant or a constant variable.
1765 if (isa<ParmVarDecl>(value)) {
1766 CEK = CEK_None;
1767 } else if (auto *var = dyn_cast<VarDecl>(value)) {
1768 CEK = checkVarTypeForConstantEmission(var->getType());
1769 } else if (isa<EnumConstantDecl>(value)) {
1770 CEK = CEK_AsValueOnly;
1771 } else {
1772 CEK = CEK_None;
1773 }
1774 if (CEK == CEK_None) return ConstantEmission();
1775
1776 Expr::EvalResult result;
1777 bool resultIsReference;
1778 QualType resultType;
1779
1780 // It's best to evaluate all the way as an r-value if that's permitted.
1781 if (CEK != CEK_AsReferenceOnly &&
1782 refExpr->EvaluateAsRValue(result, getContext())) {
1783 resultIsReference = false;
1784 resultType = refExpr->getType();
1785
1786 // Otherwise, try to evaluate as an l-value.
1787 } else if (CEK != CEK_AsValueOnly &&
1788 refExpr->EvaluateAsLValue(result, getContext())) {
1789 resultIsReference = true;
1790 resultType = value->getType();
1791
1792 // Failure.
1793 } else {
1794 return ConstantEmission();
1795 }
1796
1797 // In any case, if the initializer has side-effects, abandon ship.
1798 if (result.HasSideEffects)
1799 return ConstantEmission();
1800
1801 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1802 // referencing a global host variable by copy. In this case the lambda should
1803 // make a copy of the value of the global host variable. The DRE of the
1804 // captured reference variable cannot be emitted as load from the host
1805 // global variable as compile time constant, since the host variable is not
1806 // accessible on device. The DRE of the captured reference variable has to be
1807 // loaded from captures.
1808 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1810 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1811 if (MD && MD->getParent()->isLambda() &&
1812 MD->getOverloadedOperator() == OO_Call) {
1813 const APValue::LValueBase &base = result.Val.getLValueBase();
1814 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1815 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1816 if (!VD->hasAttr<CUDADeviceAttr>()) {
1817 return ConstantEmission();
1818 }
1819 }
1820 }
1821 }
1822 }
1823
1824 // Emit as a constant.
1825 auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1826 result.Val, resultType);
1827
1828 // Make sure we emit a debug reference to the global variable.
1829 // This should probably fire even for
1830 if (isa<VarDecl>(value)) {
1831 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1832 EmitDeclRefExprDbgValue(refExpr, result.Val);
1833 } else {
1834 assert(isa<EnumConstantDecl>(value));
1835 EmitDeclRefExprDbgValue(refExpr, result.Val);
1836 }
1837
1838 // If we emitted a reference constant, we need to dereference that.
1839 if (resultIsReference)
1841
1843}
1844
1846 const MemberExpr *ME) {
1847 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1848 // Try to emit static variable member expressions as DREs.
1849 return DeclRefExpr::Create(
1851 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1852 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1853 }
1854 return nullptr;
1855}
1856
1857CodeGenFunction::ConstantEmission
1860 return tryEmitAsConstant(DRE);
1861 return ConstantEmission();
1862}
1863
1865 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1866 assert(Constant && "not a constant");
1867 if (Constant.isReference())
1868 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1869 E->getExprLoc())
1870 .getScalarVal();
1871 return Constant.getValue();
1872}
1873
1874llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1876 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
1877 lvalue.getType(), Loc, lvalue.getBaseInfo(),
1878 lvalue.getTBAAInfo(), lvalue.isNontemporal());
1879}
1880
1882 if (Ty->isBooleanType())
1883 return true;
1884
1885 if (const EnumType *ET = Ty->getAs<EnumType>())
1886 return ET->getDecl()->getIntegerType()->isBooleanType();
1887
1888 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1889 return hasBooleanRepresentation(AT->getValueType());
1890
1891 return false;
1892}
1893
1895 llvm::APInt &Min, llvm::APInt &End,
1896 bool StrictEnums, bool IsBool) {
1897 const EnumType *ET = Ty->getAs<EnumType>();
1898 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1899 ET && !ET->getDecl()->isFixed();
1900 if (!IsBool && !IsRegularCPlusPlusEnum)
1901 return false;
1902
1903 if (IsBool) {
1904 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1905 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1906 } else {
1907 const EnumDecl *ED = ET->getDecl();
1908 ED->getValueRange(End, Min);
1909 }
1910 return true;
1911}
1912
1913llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1914 llvm::APInt Min, End;
1915 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1917 return nullptr;
1918
1919 llvm::MDBuilder MDHelper(getLLVMContext());
1920 return MDHelper.createRange(Min, End);
1921}
1922
1925 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1926 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1927 if (!HasBoolCheck && !HasEnumCheck)
1928 return false;
1929
1930 bool IsBool = hasBooleanRepresentation(Ty) ||
1932 bool NeedsBoolCheck = HasBoolCheck && IsBool;
1933 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1934 if (!NeedsBoolCheck && !NeedsEnumCheck)
1935 return false;
1936
1937 // Single-bit booleans don't need to be checked. Special-case this to avoid
1938 // a bit width mismatch when handling bitfield values. This is handled by
1939 // EmitFromMemory for the non-bitfield case.
1940 if (IsBool &&
1941 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1942 return false;
1943
1944 if (NeedsEnumCheck &&
1945 getContext().isTypeIgnoredBySanitizer(SanitizerKind::Enum, Ty))
1946 return false;
1947
1948 llvm::APInt Min, End;
1949 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1950 return true;
1951
1952 auto &Ctx = getLLVMContext();
1953 SanitizerScope SanScope(this);
1954 llvm::Value *Check;
1955 --End;
1956 if (!Min) {
1957 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1958 } else {
1959 llvm::Value *Upper =
1960 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1961 llvm::Value *Lower =
1962 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1963 Check = Builder.CreateAnd(Upper, Lower);
1964 }
1965 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1968 NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool;
1969 EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1970 StaticArgs, EmitCheckValue(Value));
1971 return true;
1972}
1973
1974llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1975 QualType Ty,
1977 LValueBaseInfo BaseInfo,
1978 TBAAAccessInfo TBAAInfo,
1979 bool isNontemporal) {
1980 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
1981 if (GV->isThreadLocal())
1982 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
1984
1985 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1986 // Boolean vectors use `iN` as storage type.
1987 if (ClangVecTy->isExtVectorBoolType()) {
1988 llvm::Type *ValTy = ConvertType(Ty);
1989 unsigned ValNumElems =
1990 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1991 // Load the `iP` storage object (P is the padded vector size).
1992 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
1993 const auto *RawIntTy = RawIntV->getType();
1994 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
1995 // Bitcast iP --> <P x i1>.
1996 auto *PaddedVecTy = llvm::FixedVectorType::get(
1997 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1998 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
1999 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2000 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
2001
2002 return EmitFromMemory(V, Ty);
2003 }
2004
2005 // Handle vectors of size 3 like size 4 for better performance.
2006 const llvm::Type *EltTy = Addr.getElementType();
2007 const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
2008
2009 if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {
2010
2011 llvm::VectorType *vec4Ty =
2012 llvm::FixedVectorType::get(VTy->getElementType(), 4);
2013 Address Cast = Addr.withElementType(vec4Ty);
2014 // Now load value.
2015 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
2016
2017 // Shuffle vector to get vec3.
2018 V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec");
2019 return EmitFromMemory(V, Ty);
2020 }
2021 }
2022
2023 // Atomic operations have to be done on integral types.
2024 LValue AtomicLValue =
2025 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2026 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
2027 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
2028 }
2029
2030 Addr =
2032
2033 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
2034 if (isNontemporal) {
2035 llvm::MDNode *Node = llvm::MDNode::get(
2036 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2037 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2038 }
2039
2040 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2041
2042 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2043 // In order to prevent the optimizer from throwing away the check, don't
2044 // attach range metadata to the load.
2045 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
2046 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2047 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2048 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2049 llvm::MDNode::get(getLLVMContext(), {}));
2050 }
2051
2052 return EmitFromMemory(Load, Ty);
2053}
2054
2055/// Converts a scalar value from its primary IR type (as returned
2056/// by ConvertType) to its load/store type (as returned by
2057/// convertTypeForLoadStore).
2058llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2059 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2060 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2062 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2063 }
2064
2065 if (Ty->isExtVectorBoolType()) {
2066 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2067 // Expand to the memory bit width.
2068 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2069 // <N x i1> --> <P x i1>.
2070 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2071 // <P x i1> --> iP.
2072 Value = Builder.CreateBitCast(Value, StoreTy);
2073 }
2074
2075 return Value;
2076}
2077
2078/// Converts a scalar value from its load/store type (as returned
2079/// by convertTypeForLoadStore) to its primary IR type (as returned
2080/// by ConvertType).
2081llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2082 if (Ty->isExtVectorBoolType()) {
2083 const auto *RawIntTy = Value->getType();
2084 // Bitcast iP --> <P x i1>.
2085 auto *PaddedVecTy = llvm::FixedVectorType::get(
2086 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2087 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2088 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2089 llvm::Type *ValTy = ConvertType(Ty);
2090 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2091 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2092 }
2093
2094 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2095 llvm::Type *ResTy = ConvertType(Ty);
2096 return Builder.CreateTrunc(Value, ResTy, "loadedv");
2097 }
2098
2099 return Value;
2100}
2101
2102// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2103// MatrixType), if it points to a array (the memory type of MatrixType).
2105 CodeGenFunction &CGF,
2106 bool IsVector = true) {
2107 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2108 if (ArrayTy && IsVector) {
2109 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2110 ArrayTy->getNumElements());
2111
2112 return Addr.withElementType(VectorTy);
2113 }
2114 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2115 if (VectorTy && !IsVector) {
2116 auto *ArrayTy = llvm::ArrayType::get(
2117 VectorTy->getElementType(),
2118 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2119
2120 return Addr.withElementType(ArrayTy);
2121 }
2122
2123 return Addr;
2124}
2125
2126// Emit a store of a matrix LValue. This may require casting the original
2127// pointer to memory address (ArrayType) to a pointer to the value type
2128// (VectorType).
2129static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2130 bool isInit, CodeGenFunction &CGF) {
2131 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2132 value->getType()->isVectorTy());
2133 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2134 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2135 lvalue.isNontemporal());
2136}
2137
2138void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
2139 bool Volatile, QualType Ty,
2140 LValueBaseInfo BaseInfo,
2141 TBAAAccessInfo TBAAInfo,
2142 bool isInit, bool isNontemporal) {
2143 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2144 if (GV->isThreadLocal())
2145 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2147
2148 llvm::Type *SrcTy = Value->getType();
2149 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2150 auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);
2151 if (!CGM.getCodeGenOpts().PreserveVec3Type) {
2152 // Handle vec3 special.
2153 if (VecTy && !ClangVecTy->isExtVectorBoolType() &&
2154 cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
2155 // Our source is a vec3, do a shuffle vector to make it a vec4.
2156 Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
2157 "extractVec");
2158 SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
2159 }
2160 if (Addr.getElementType() != SrcTy) {
2161 Addr = Addr.withElementType(SrcTy);
2162 }
2163 }
2164 }
2165
2166 Value = EmitToMemory(Value, Ty);
2167
2168 LValue AtomicLValue =
2169 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2170 if (Ty->isAtomicType() ||
2171 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2172 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2173 return;
2174 }
2175
2176 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2177 if (isNontemporal) {
2178 llvm::MDNode *Node =
2179 llvm::MDNode::get(Store->getContext(),
2180 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2181 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2182 }
2183
2184 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2185}
2186
2187void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2188 bool isInit) {
2189 if (lvalue.getType()->isConstantMatrixType()) {
2190 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2191 return;
2192 }
2193
2194 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2195 lvalue.getType(), lvalue.getBaseInfo(),
2196 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2197}
2198
2199// Emit a load of a LValue of matrix type. This may require casting the pointer
2200// to memory address (ArrayType) to a pointer to the value type (VectorType).
2202 CodeGenFunction &CGF) {
2203 assert(LV.getType()->isConstantMatrixType());
2205 LV.setAddress(Addr);
2206 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2207}
2208
2211 QualType Ty = LV.getType();
2212 switch (getEvaluationKind(Ty)) {
2213 case TEK_Scalar:
2214 return EmitLoadOfLValue(LV, Loc);
2215 case TEK_Complex:
2217 case TEK_Aggregate:
2218 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2219 return Slot.asRValue();
2220 }
2221 llvm_unreachable("bad evaluation kind");
2222}
2223
2224/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2225/// method emits the address of the lvalue, then loads the result as an rvalue,
2226/// returning the rvalue.
2228 if (LV.isObjCWeak()) {
2229 // load of a __weak object.
2230 Address AddrWeakObj = LV.getAddress();
2232 AddrWeakObj));
2233 }
2235 // In MRC mode, we do a load+autorelease.
2236 if (!getLangOpts().ObjCAutoRefCount) {
2238 }
2239
2240 // In ARC mode, we load retained and then consume the value.
2241 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2242 Object = EmitObjCConsumeObject(LV.getType(), Object);
2243 return RValue::get(Object);
2244 }
2245
2246 if (LV.isSimple()) {
2247 assert(!LV.getType()->isFunctionType());
2248
2249 if (LV.getType()->isConstantMatrixType())
2250 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2251
2252 // Everything needs a load.
2253 return RValue::get(EmitLoadOfScalar(LV, Loc));
2254 }
2255
2256 if (LV.isVectorElt()) {
2257 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2258 LV.isVolatileQualified());
2259 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2260 "vecext"));
2261 }
2262
2263 // If this is a reference to a subset of the elements of a vector, either
2264 // shuffle the input or extract/insert them as appropriate.
2265 if (LV.isExtVectorElt()) {
2267 }
2268
2269 // Global Register variables always invoke intrinsics
2270 if (LV.isGlobalReg())
2271 return EmitLoadOfGlobalRegLValue(LV);
2272
2273 if (LV.isMatrixElt()) {
2274 llvm::Value *Idx = LV.getMatrixIdx();
2275 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2276 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2277 llvm::MatrixBuilder MB(Builder);
2278 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2279 }
2280 llvm::LoadInst *Load =
2282 return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2283 }
2284
2285 assert(LV.isBitField() && "Unknown LValue type!");
2286 return EmitLoadOfBitfieldLValue(LV, Loc);
2287}
2288
2291 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2292
2293 // Get the output type.
2294 llvm::Type *ResLTy = ConvertType(LV.getType());
2295
2296 Address Ptr = LV.getBitFieldAddress();
2297 llvm::Value *Val =
2298 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2299
2300 bool UseVolatile = LV.isVolatileQualified() &&
2301 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2302 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2303 const unsigned StorageSize =
2304 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2305 if (Info.IsSigned) {
2306 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2307 unsigned HighBits = StorageSize - Offset - Info.Size;
2308 if (HighBits)
2309 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2310 if (Offset + HighBits)
2311 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2312 } else {
2313 if (Offset)
2314 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2315 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2316 Val = Builder.CreateAnd(
2317 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2318 }
2319 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2320 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2321 return RValue::get(Val);
2322}
2323
2324// If this is a reference to a subset of the elements of a vector, create an
2325// appropriate shufflevector.
2327 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2328 LV.isVolatileQualified());
2329
2330 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2331 // IR value to a vector here allows the rest of codegen to behave as normal.
2332 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2333 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2334 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2335 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2336 }
2337
2338 const llvm::Constant *Elts = LV.getExtVectorElts();
2339
2340 // If the result of the expression is a non-vector type, we must be extracting
2341 // a single element. Just codegen as an extractelement.
2342 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2343 if (!ExprVT) {
2344 unsigned InIdx = getAccessedFieldNo(0, Elts);
2345 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2346 return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2347 }
2348
2349 // Always use shuffle vector to try to retain the original program structure
2350 unsigned NumResultElts = ExprVT->getNumElements();
2351
2353 for (unsigned i = 0; i != NumResultElts; ++i)
2354 Mask.push_back(getAccessedFieldNo(i, Elts));
2355
2356 Vec = Builder.CreateShuffleVector(Vec, Mask);
2357 return RValue::get(Vec);
2358}
2359
2360/// Generates lvalue for partial ext_vector access.
2362 Address VectorAddress = LV.getExtVectorAddress();
2363 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2364 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2365
2366 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2367
2368 const llvm::Constant *Elts = LV.getExtVectorElts();
2369 unsigned ix = getAccessedFieldNo(0, Elts);
2370
2371 Address VectorBasePtrPlusIx =
2372 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2373 "vector.elt");
2374
2375 return VectorBasePtrPlusIx;
2376}
2377
2378/// Load of global named registers are always calls to intrinsics.
2380 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2381 "Bad type for register variable");
2382 llvm::MDNode *RegName = cast<llvm::MDNode>(
2383 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2384
2385 // We accept integer and pointer types only
2386 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2387 llvm::Type *Ty = OrigTy;
2388 if (OrigTy->isPointerTy())
2389 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2390 llvm::Type *Types[] = { Ty };
2391
2392 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2393 llvm::Value *Call = Builder.CreateCall(
2394 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2395 if (OrigTy->isPointerTy())
2396 Call = Builder.CreateIntToPtr(Call, OrigTy);
2397 return RValue::get(Call);
2398}
2399
2400/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2401/// lvalue, where both are guaranteed to the have the same type, and that type
2402/// is 'Ty'.
2404 bool isInit) {
2405 if (!Dst.isSimple()) {
2406 if (Dst.isVectorElt()) {
2407 // Read/modify/write the vector, inserting the new element.
2408 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2409 Dst.isVolatileQualified());
2410 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2411 if (IRStoreTy) {
2412 auto *IRVecTy = llvm::FixedVectorType::get(
2413 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2414 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2415 // iN --> <N x i1>.
2416 }
2417 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
2418 Dst.getVectorIdx(), "vecins");
2419 if (IRStoreTy) {
2420 // <N x i1> --> <iN>.
2421 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2422 }
2424 Dst.isVolatileQualified());
2425 return;
2426 }
2427
2428 // If this is an update of extended vector elements, insert them as
2429 // appropriate.
2430 if (Dst.isExtVectorElt())
2432
2433 if (Dst.isGlobalReg())
2434 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2435
2436 if (Dst.isMatrixElt()) {
2437 llvm::Value *Idx = Dst.getMatrixIdx();
2438 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2439 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2440 llvm::MatrixBuilder MB(Builder);
2441 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2442 }
2443 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2444 llvm::Value *Vec =
2445 Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2447 Dst.isVolatileQualified());
2448 return;
2449 }
2450
2451 assert(Dst.isBitField() && "Unknown LValue type");
2452 return EmitStoreThroughBitfieldLValue(Src, Dst);
2453 }
2454
2455 // There's special magic for assigning into an ARC-qualified l-value.
2456 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2457 switch (Lifetime) {
2459 llvm_unreachable("present but none");
2460
2462 // nothing special
2463 break;
2464
2466 if (isInit) {
2467 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2468 break;
2469 }
2470 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2471 return;
2472
2474 if (isInit)
2475 // Initialize and then skip the primitive store.
2477 else
2479 /*ignore*/ true);
2480 return;
2481
2484 Src.getScalarVal()));
2485 // fall into the normal path
2486 break;
2487 }
2488 }
2489
2490 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2491 // load of a __weak object.
2492 Address LvalueDst = Dst.getAddress();
2493 llvm::Value *src = Src.getScalarVal();
2494 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2495 return;
2496 }
2497
2498 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2499 // load of a __strong object.
2500 Address LvalueDst = Dst.getAddress();
2501 llvm::Value *src = Src.getScalarVal();
2502 if (Dst.isObjCIvar()) {
2503 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2504 llvm::Type *ResultType = IntPtrTy;
2506 llvm::Value *RHS = dst.emitRawPointer(*this);
2507 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2508 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2509 ResultType, "sub.ptr.lhs.cast");
2510 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2511 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2512 } else if (Dst.isGlobalObjCRef()) {
2513 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2514 Dst.isThreadLocalRef());
2515 }
2516 else
2517 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2518 return;
2519 }
2520
2521 assert(Src.isScalar() && "Can't emit an agg store with this method");
2522 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2523}
2524
2526 llvm::Value **Result) {
2527 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2528 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
2529 Address Ptr = Dst.getBitFieldAddress();
2530
2531 // Get the source value, truncated to the width of the bit-field.
2532 llvm::Value *SrcVal = Src.getScalarVal();
2533
2534 // Cast the source to the storage type and shift it into place.
2535 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2536 /*isSigned=*/false);
2537 llvm::Value *MaskedVal = SrcVal;
2538
2539 const bool UseVolatile =
2540 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2541 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2542 const unsigned StorageSize =
2543 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2544 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2545 // See if there are other bits in the bitfield's storage we'll need to load
2546 // and mask together with source before storing.
2547 if (StorageSize != Info.Size) {
2548 assert(StorageSize > Info.Size && "Invalid bitfield size.");
2549 llvm::Value *Val =
2550 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2551
2552 // Mask the source value as needed.
2554 SrcVal = Builder.CreateAnd(
2555 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2556 "bf.value");
2557 MaskedVal = SrcVal;
2558 if (Offset)
2559 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2560
2561 // Mask out the original value.
2562 Val = Builder.CreateAnd(
2563 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2564 "bf.clear");
2565
2566 // Or together the unchanged values and the source value.
2567 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2568 } else {
2569 assert(Offset == 0);
2570 // According to the AACPS:
2571 // When a volatile bit-field is written, and its container does not overlap
2572 // with any non-bit-field member, its container must be read exactly once
2573 // and written exactly once using the access width appropriate to the type
2574 // of the container. The two accesses are not atomic.
2575 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2576 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2577 Builder.CreateLoad(Ptr, true, "bf.load");
2578 }
2579
2580 // Write the new value back out.
2581 Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2582
2583 // Return the new value of the bit-field, if requested.
2584 if (Result) {
2585 llvm::Value *ResultVal = MaskedVal;
2586
2587 // Sign extend the value if needed.
2588 if (Info.IsSigned) {
2589 assert(Info.Size <= StorageSize);
2590 unsigned HighBits = StorageSize - Info.Size;
2591 if (HighBits) {
2592 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2593 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2594 }
2595 }
2596
2597 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2598 "bf.result.cast");
2599 *Result = EmitFromMemory(ResultVal, Dst.getType());
2600 }
2601}
2602
2604 LValue Dst) {
2605 // HLSL allows storing to scalar values through ExtVector component LValues.
2606 // To support this we need to handle the case where the destination address is
2607 // a scalar.
2608 Address DstAddr = Dst.getExtVectorAddress();
2609 if (!DstAddr.getElementType()->isVectorTy()) {
2610 assert(!Dst.getType()->isVectorType() &&
2611 "this should only occur for non-vector l-values");
2612 Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());
2613 return;
2614 }
2615
2616 // This access turns into a read/modify/write of the vector. Load the input
2617 // value now.
2618 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2619 const llvm::Constant *Elts = Dst.getExtVectorElts();
2620
2621 llvm::Value *SrcVal = Src.getScalarVal();
2622
2623 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2624 unsigned NumSrcElts = VTy->getNumElements();
2625 unsigned NumDstElts =
2626 cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2627 if (NumDstElts == NumSrcElts) {
2628 // Use shuffle vector is the src and destination are the same number of
2629 // elements and restore the vector mask since it is on the side it will be
2630 // stored.
2631 SmallVector<int, 4> Mask(NumDstElts);
2632 for (unsigned i = 0; i != NumSrcElts; ++i)
2633 Mask[getAccessedFieldNo(i, Elts)] = i;
2634
2635 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2636 } else if (NumDstElts > NumSrcElts) {
2637 // Extended the source vector to the same length and then shuffle it
2638 // into the destination.
2639 // FIXME: since we're shuffling with undef, can we just use the indices
2640 // into that? This could be simpler.
2641 SmallVector<int, 4> ExtMask;
2642 for (unsigned i = 0; i != NumSrcElts; ++i)
2643 ExtMask.push_back(i);
2644 ExtMask.resize(NumDstElts, -1);
2645 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2646 // build identity
2648 for (unsigned i = 0; i != NumDstElts; ++i)
2649 Mask.push_back(i);
2650
2651 // When the vector size is odd and .odd or .hi is used, the last element
2652 // of the Elts constant array will be one past the size of the vector.
2653 // Ignore the last element here, if it is greater than the mask size.
2654 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2655 NumSrcElts--;
2656
2657 // modify when what gets shuffled in
2658 for (unsigned i = 0; i != NumSrcElts; ++i)
2659 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2660 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2661 } else {
2662 // We should never shorten the vector
2663 llvm_unreachable("unexpected shorten vector length");
2664 }
2665 } else {
2666 // If the Src is a scalar (not a vector), and the target is a vector it must
2667 // be updating one element.
2668 unsigned InIdx = getAccessedFieldNo(0, Elts);
2669 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2670 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2671 }
2672
2674 Dst.isVolatileQualified());
2675}
2676
2677/// Store of global named registers are always calls to intrinsics.
2679 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2680 "Bad type for register variable");
2681 llvm::MDNode *RegName = cast<llvm::MDNode>(
2682 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2683 assert(RegName && "Register LValue is not metadata");
2684
2685 // We accept integer and pointer types only
2686 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2687 llvm::Type *Ty = OrigTy;
2688 if (OrigTy->isPointerTy())
2689 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2690 llvm::Type *Types[] = { Ty };
2691
2692 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2693 llvm::Value *Value = Src.getScalarVal();
2694 if (OrigTy->isPointerTy())
2695 Value = Builder.CreatePtrToInt(Value, Ty);
2696 Builder.CreateCall(
2697 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2698}
2699
2700// setObjCGCLValueClass - sets class of the lvalue for the purpose of
2701// generating write-barries API. It is currently a global, ivar,
2702// or neither.
2703static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2704 LValue &LV,
2705 bool IsMemberAccess=false) {
2706 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2707 return;
2708
2709 if (isa<ObjCIvarRefExpr>(E)) {
2710 QualType ExpTy = E->getType();
2711 if (IsMemberAccess && ExpTy->isPointerType()) {
2712 // If ivar is a structure pointer, assigning to field of
2713 // this struct follows gcc's behavior and makes it a non-ivar
2714 // writer-barrier conservatively.
2715 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2716 if (ExpTy->isRecordType()) {
2717 LV.setObjCIvar(false);
2718 return;
2719 }
2720 }
2721 LV.setObjCIvar(true);
2722 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2723 LV.setBaseIvarExp(Exp->getBase());
2725 return;
2726 }
2727
2728 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2729 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2730 if (VD->hasGlobalStorage()) {
2731 LV.setGlobalObjCRef(true);
2732 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2733 }
2734 }
2736 return;
2737 }
2738
2739 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2740 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2741 return;
2742 }
2743
2744 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2745 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2746 if (LV.isObjCIvar()) {
2747 // If cast is to a structure pointer, follow gcc's behavior and make it
2748 // a non-ivar write-barrier.
2749 QualType ExpTy = E->getType();
2750 if (ExpTy->isPointerType())
2751 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2752 if (ExpTy->isRecordType())
2753 LV.setObjCIvar(false);
2754 }
2755 return;
2756 }
2757
2758 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2759 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2760 return;
2761 }
2762
2763 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2764 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2765 return;
2766 }
2767
2768 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2769 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2770 return;
2771 }
2772
2773 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2774 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2775 return;
2776 }
2777
2778 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2779 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2780 if (LV.isObjCIvar() && !LV.isObjCArray())
2781 // Using array syntax to assigning to what an ivar points to is not
2782 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2783 LV.setObjCIvar(false);
2784 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2785 // Using array syntax to assigning to what global points to is not
2786 // same as assigning to the global itself. {id *G;} G[i] = 0;
2787 LV.setGlobalObjCRef(false);
2788 return;
2789 }
2790
2791 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2792 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2793 // We don't know if member is an 'ivar', but this flag is looked at
2794 // only in the context of LV.isObjCIvar().
2796 return;
2797 }
2798}
2799
2801 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2802 llvm::Type *RealVarTy, SourceLocation Loc) {
2803 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2805 CGF, VD, Addr, Loc);
2806 else
2807 Addr =
2808 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2809
2810 Addr = Addr.withElementType(RealVarTy);
2811 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2812}
2813
2815 const VarDecl *VD, QualType T) {
2816 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2817 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2818 // Return an invalid address if variable is MT_To (or MT_Enter starting with
2819 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2820 // and MT_To (or MT_Enter) with unified memory, return a valid address.
2821 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2822 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2824 return Address::invalid();
2825 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2826 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2827 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2829 "Expected link clause OR to clause with unified memory enabled.");
2830 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2832 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2833}
2834
2835Address
2837 LValueBaseInfo *PointeeBaseInfo,
2838 TBAAAccessInfo *PointeeTBAAInfo) {
2839 llvm::LoadInst *Load =
2840 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
2842 return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
2843 CharUnits(), /*ForPointeeType=*/true,
2844 PointeeBaseInfo, PointeeTBAAInfo);
2845}
2846
2848 LValueBaseInfo PointeeBaseInfo;
2849 TBAAAccessInfo PointeeTBAAInfo;
2850 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2851 &PointeeTBAAInfo);
2852 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2853 PointeeBaseInfo, PointeeTBAAInfo);
2854}
2855
2857 const PointerType *PtrTy,
2858 LValueBaseInfo *BaseInfo,
2859 TBAAAccessInfo *TBAAInfo) {
2860 llvm::Value *Addr = Builder.CreateLoad(Ptr);
2861 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
2862 CharUnits(), /*ForPointeeType=*/true,
2863 BaseInfo, TBAAInfo);
2864}
2865
2867 const PointerType *PtrTy) {
2868 LValueBaseInfo BaseInfo;
2869 TBAAAccessInfo TBAAInfo;
2870 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2871 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2872}
2873
2875 const Expr *E, const VarDecl *VD) {
2876 QualType T = E->getType();
2877
2878 // If it's thread_local, emit a call to its wrapper function instead.
2879 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2881 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2882 // Check if the variable is marked as declare target with link clause in
2883 // device codegen.
2884 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
2885 Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2886 if (Addr.isValid())
2887 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2888 }
2889
2890 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2891
2892 if (VD->getTLSKind() != VarDecl::TLS_None)
2893 V = CGF.Builder.CreateThreadLocalAddress(V);
2894
2895 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2896 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2897 Address Addr(V, RealVarTy, Alignment);
2898 // Emit reference to the private copy of the variable if it is an OpenMP
2899 // threadprivate variable.
2900 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2901 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2902 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2903 E->getExprLoc());
2904 }
2905 LValue LV = VD->getType()->isReferenceType() ?
2906 CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2909 setObjCGCLValueClass(CGF.getContext(), E, LV);
2910 return LV;
2911}
2912
2914 llvm::Type *Ty) {
2915 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2916 if (FD->hasAttr<WeakRefAttr>()) {
2918 return aliasee.getPointer();
2919 }
2920
2921 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
2922 return V;
2923}
2924
2926 GlobalDecl GD) {
2927 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2928 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
2929 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2930 return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2932}
2933
2935 llvm::Value *ThisValue) {
2936
2937 return CGF.EmitLValueForLambdaField(FD, ThisValue);
2938}
2939
2940/// Named Registers are named metadata pointing to the register name
2941/// which will be read from/written to as an argument to the intrinsic
2942/// @llvm.read/write_register.
2943/// So far, only the name is being passed down, but other options such as
2944/// register type, allocation type or even optimization options could be
2945/// passed down via the metadata node.
2947 SmallString<64> Name("llvm.named.register.");
2948 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2949 assert(Asm->getLabel().size() < 64-Name.size() &&
2950 "Register name too big");
2951 Name.append(Asm->getLabel());
2952 llvm::NamedMDNode *M =
2953 CGM.getModule().getOrInsertNamedMetadata(Name);
2954 if (M->getNumOperands() == 0) {
2955 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2956 Asm->getLabel());
2957 llvm::Metadata *Ops[] = {Str};
2958 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2959 }
2960
2961 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2962
2963 llvm::Value *Ptr =
2964 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2965 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
2966}
2967
2968/// Determine whether we can emit a reference to \p VD from the current
2969/// context, despite not necessarily having seen an odr-use of the variable in
2970/// this context.
2972 const DeclRefExpr *E,
2973 const VarDecl *VD) {
2974 // For a variable declared in an enclosing scope, do not emit a spurious
2975 // reference even if we have a capture, as that will emit an unwarranted
2976 // reference to our capture state, and will likely generate worse code than
2977 // emitting a local copy.
2978 if (E->refersToEnclosingVariableOrCapture())
2979 return false;
2980
2981 // For a local declaration declared in this function, we can always reference
2982 // it even if we don't have an odr-use.
2983 if (VD->hasLocalStorage()) {
2984 return VD->getDeclContext() ==
2985 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2986 }
2987
2988 // For a global declaration, we can emit a reference to it if we know
2989 // for sure that we are able to emit a definition of it.
2990 VD = VD->getDefinition(CGF.getContext());
2991 if (!VD)
2992 return false;
2993
2994 // Don't emit a spurious reference if it might be to a variable that only
2995 // exists on a different device / target.
2996 // FIXME: This is unnecessarily broad. Check whether this would actually be a
2997 // cross-target reference.
2998 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
2999 CGF.getLangOpts().OpenCL) {
3000 return false;
3001 }
3002
3003 // We can emit a spurious reference only if the linkage implies that we'll
3004 // be emitting a non-interposable symbol that will be retained until link
3005 // time.
3006 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
3007 case llvm::GlobalValue::ExternalLinkage:
3008 case llvm::GlobalValue::LinkOnceODRLinkage:
3009 case llvm::GlobalValue::WeakODRLinkage:
3010 case llvm::GlobalValue::InternalLinkage:
3011 case llvm::GlobalValue::PrivateLinkage:
3012 return true;
3013 default:
3014 return false;
3015 }
3016}
3017
3019 const NamedDecl *ND = E->getDecl();
3020 QualType T = E->getType();
3021
3022 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
3023 "should not emit an unevaluated operand");
3024
3025 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3026 // Global Named registers access via intrinsics only
3027 if (VD->getStorageClass() == SC_Register &&
3028 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
3029 return EmitGlobalNamedRegister(VD, CGM);
3030
3031 // If this DeclRefExpr does not constitute an odr-use of the variable,
3032 // we're not permitted to emit a reference to it in general, and it might
3033 // not be captured if capture would be necessary for a use. Emit the
3034 // constant value directly instead.
3035 if (E->isNonOdrUse() == NOUR_Constant &&
3036 (VD->getType()->isReferenceType() ||
3037 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3038 VD->getAnyInitializer(VD);
3039 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3040 E->getLocation(), *VD->evaluateValue(), VD->getType());
3041 assert(Val && "failed to emit constant expression");
3042
3043 Address Addr = Address::invalid();
3044 if (!VD->getType()->isReferenceType()) {
3045 // Spill the constant value to a global.
3046 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3047 getContext().getDeclAlign(VD));
3048 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3049 auto *PTy = llvm::PointerType::get(
3050 VarTy, getTypes().getTargetAddressSpace(VD->getType()));
3051 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3052 } else {
3053 // Should we be using the alignment of the constant pointer we emitted?
3054 CharUnits Alignment =
3056 /* BaseInfo= */ nullptr,
3057 /* TBAAInfo= */ nullptr,
3058 /* forPointeeType= */ true);
3059 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3060 }
3061 return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
3062 }
3063
3064 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3065
3066 // Check for captured variables.
3067 if (E->refersToEnclosingVariableOrCapture()) {
3068 VD = VD->getCanonicalDecl();
3069 if (auto *FD = LambdaCaptureFields.lookup(VD))
3070 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3071 if (CapturedStmtInfo) {
3072 auto I = LocalDeclMap.find(VD);
3073 if (I != LocalDeclMap.end()) {
3074 LValue CapLVal;
3075 if (VD->getType()->isReferenceType())
3076 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3078 else
3079 CapLVal = MakeAddrLValue(I->second, T);
3080 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3081 // in simd context.
3082 if (getLangOpts().OpenMP &&
3084 CapLVal.setNontemporal(/*Value=*/true);
3085 return CapLVal;
3086 }
3087 LValue CapLVal =
3090 Address LValueAddress = CapLVal.getAddress();
3091 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3092 LValueAddress.getElementType(),
3093 getContext().getDeclAlign(VD)),
3094 CapLVal.getType(),
3096 CapLVal.getTBAAInfo());
3097 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3098 // in simd context.
3099 if (getLangOpts().OpenMP &&
3101 CapLVal.setNontemporal(/*Value=*/true);
3102 return CapLVal;
3103 }
3104
3105 assert(isa<BlockDecl>(CurCodeDecl));
3106 Address addr = GetAddrOfBlockDecl(VD);
3107 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3108 }
3109 }
3110
3111 // FIXME: We should be able to assert this for FunctionDecls as well!
3112 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3113 // those with a valid source location.
3114 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3115 !E->getLocation().isValid()) &&
3116 "Should not use decl without marking it used!");
3117
3118 if (ND->hasAttr<WeakRefAttr>()) {
3119 const auto *VD = cast<ValueDecl>(ND);
3121 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3122 }
3123
3124 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3125 // Check if this is a global variable.
3126 if (VD->hasLinkage() || VD->isStaticDataMember())
3127 return EmitGlobalVarDeclLValue(*this, E, VD);
3128
3129 Address addr = Address::invalid();
3130
3131 // The variable should generally be present in the local decl map.
3132 auto iter = LocalDeclMap.find(VD);
3133 if (iter != LocalDeclMap.end()) {
3134 addr = iter->second;
3135
3136 // Otherwise, it might be static local we haven't emitted yet for
3137 // some reason; most likely, because it's in an outer function.
3138 } else if (VD->isStaticLocal()) {
3139 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3141 addr = Address(
3142 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3143
3144 // No other cases for now.
3145 } else {
3146 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3147 }
3148
3149 // Handle threadlocal function locals.
3150 if (VD->getTLSKind() != VarDecl::TLS_None)
3151 addr = addr.withPointer(
3152 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3154
3155 // Check for OpenMP threadprivate variables.
3156 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3157 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3159 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3160 E->getExprLoc());
3161 }
3162
3163 // Drill into block byref variables.
3164 bool isBlockByref = VD->isEscapingByref();
3165 if (isBlockByref) {
3166 addr = emitBlockByrefAddress(addr, VD);
3167 }
3168
3169 // Drill into reference types.
3170 LValue LV = VD->getType()->isReferenceType() ?
3171 EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
3173
3174 bool isLocalStorage = VD->hasLocalStorage();
3175
3176 bool NonGCable = isLocalStorage &&
3177 !VD->getType()->isReferenceType() &&
3178 !isBlockByref;
3179 if (NonGCable) {
3181 LV.setNonGC(true);
3182 }
3183
3184 bool isImpreciseLifetime =
3185 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3186 if (isImpreciseLifetime)
3189 return LV;
3190 }
3191
3192 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3193 return EmitFunctionDeclLValue(*this, E, FD);
3194
3195 // FIXME: While we're emitting a binding from an enclosing scope, all other
3196 // DeclRefExprs we see should be implicitly treated as if they also refer to
3197 // an enclosing scope.
3198 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3199 if (E->refersToEnclosingVariableOrCapture()) {
3200 auto *FD = LambdaCaptureFields.lookup(BD);
3201 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3202 }
3203 return EmitLValue(BD->getBinding());
3204 }
3205
3206 // We can form DeclRefExprs naming GUID declarations when reconstituting
3207 // non-type template parameters into expressions.
3208 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3211
3212 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3213 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3214 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3215
3216 if (AS != T.getAddressSpace()) {
3217 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3218 auto PtrTy = llvm::PointerType::get(CGM.getLLVMContext(), TargetAS);
3220 CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);
3221 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3222 }
3223
3224 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3225 }
3226
3227 llvm_unreachable("Unhandled DeclRefExpr");
3228}
3229
3231 // __extension__ doesn't affect lvalue-ness.
3232 if (E->getOpcode() == UO_Extension)
3233 return EmitLValue(E->getSubExpr());
3234
3235 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
3236 switch (E->getOpcode()) {
3237 default: llvm_unreachable("Unknown unary operator lvalue!");
3238 case UO_Deref: {
3239 QualType T = E->getSubExpr()->getType()->getPointeeType();
3240 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3241
3242 LValueBaseInfo BaseInfo;
3243 TBAAAccessInfo TBAAInfo;
3244 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
3245 &TBAAInfo);
3246 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3248
3249 // We should not generate __weak write barrier on indirect reference
3250 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3251 // But, we continue to generate __strong write barrier on indirect write
3252 // into a pointer to object.
3253 if (getLangOpts().ObjC &&
3254 getLangOpts().getGC() != LangOptions::NonGC &&
3255 LV.isObjCWeak())
3257 return LV;
3258 }
3259 case UO_Real:
3260 case UO_Imag: {
3261 LValue LV = EmitLValue(E->getSubExpr());
3262 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3263
3264 // __real is valid on scalars. This is a faster way of testing that.
3265 // __imag can only produce an rvalue on scalars.
3266 if (E->getOpcode() == UO_Real &&
3267 !LV.getAddress().getElementType()->isStructTy()) {
3268 assert(E->getSubExpr()->getType()->isArithmeticType());
3269 return LV;
3270 }
3271
3272 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3273
3274 Address Component =
3275 (E->getOpcode() == UO_Real
3277 : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
3278 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3280 ElemLV.getQuals().addQualifiers(LV.getQuals());
3281 return ElemLV;
3282 }
3283 case UO_PreInc:
3284 case UO_PreDec: {
3285 LValue LV = EmitLValue(E->getSubExpr());
3286 bool isInc = E->getOpcode() == UO_PreInc;
3287
3288 if (E->getType()->isAnyComplexType())
3289 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3290 else
3291 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3292 return LV;
3293 }
3294 }
3295}
3296
3300}
3301
3305}
3306
3308 auto SL = E->getFunctionName();
3309 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3310 StringRef FnName = CurFn->getName();
3311 if (FnName.starts_with("\01"))
3312 FnName = FnName.substr(1);
3313 StringRef NameItems[] = {
3314 PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
3315 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3316 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3317 std::string Name = std::string(SL->getString());
3318 if (!Name.empty()) {
3319 unsigned Discriminator =
3321 if (Discriminator)
3322 Name += "_" + Twine(Discriminator + 1).str();
3323 auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3325 } else {
3326 auto C =
3327 CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3329 }
3330 }
3331 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3333}
3334
3335/// Emit a type description suitable for use by a runtime sanitizer library. The
3336/// format of a type descriptor is
3337///
3338/// \code
3339/// { i16 TypeKind, i16 TypeInfo }
3340/// \endcode
3341///
3342/// followed by an array of i8 containing the type name with extra information
3343/// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3344/// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3345/// anything else.
3347 // Only emit each type's descriptor once.
3348 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3349 return C;
3350
3351 uint16_t TypeKind = TK_Unknown;
3352 uint16_t TypeInfo = 0;
3353 bool IsBitInt = false;
3354
3355 if (T->isIntegerType()) {
3356 TypeKind = TK_Integer;
3357 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3358 (T->isSignedIntegerType() ? 1 : 0);
3359 // Follow suggestion from discussion of issue 64100.
3360 // So we can write the exact amount of bits in TypeName after '\0'
3361 // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3362 if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3363 // Do a sanity checks as we are using 32-bit type to store bit length.
3364 assert(getContext().getTypeSize(T) > 0 &&
3365 " non positive amount of bits in __BitInt type");
3366 assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3367 " too many bits in __BitInt type");
3368
3369 // Redefine TypeKind with the actual __BitInt type if we have signed
3370 // BitInt.
3371 TypeKind = TK_BitInt;
3372 IsBitInt = true;
3373 }
3374 } else if (T->isFloatingType()) {
3375 TypeKind = TK_Float;
3377 }
3378
3379 // Format the type name as if for a diagnostic, including quotes and
3380 // optionally an 'aka'.
3381 SmallString<32> Buffer;
3383 (intptr_t)T.getAsOpaquePtr(), StringRef(),
3384 StringRef(), {}, Buffer, {});
3385
3386 if (IsBitInt) {
3387 // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3388 // endianness, zero.
3389 char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3390 const auto *EIT = T->castAs<BitIntType>();
3391 uint32_t Bits = EIT->getNumBits();
3392 llvm::support::endian::write32(S + 1, Bits,
3393 getTarget().isBigEndian()
3394 ? llvm::endianness::big
3395 : llvm::endianness::little);
3396 StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3397 Buffer.append(Str);
3398 }
3399
3400 llvm::Constant *Components[] = {
3401 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3402 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3403 };
3404 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3405
3406 auto *GV = new llvm::GlobalVariable(
3407 CGM.getModule(), Descriptor->getType(),
3408 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3409 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3411
3412 // Remember the descriptor for this type.
3414
3415 return GV;
3416}
3417
3418llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3419 llvm::Type *TargetTy = IntPtrTy;
3420
3421 if (V->getType() == TargetTy)
3422 return V;
3423
3424 // Floating-point types which fit into intptr_t are bitcast to integers
3425 // and then passed directly (after zero-extension, if necessary).
3426 if (V->getType()->isFloatingPointTy()) {
3427 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3428 if (Bits <= TargetTy->getIntegerBitWidth())
3429 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3430 Bits));
3431 }
3432
3433 // Integers which fit in intptr_t are zero-extended and passed directly.
3434 if (V->getType()->isIntegerTy() &&
3435 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3436 return Builder.CreateZExt(V, TargetTy);
3437
3438 // Pointers are passed directly, everything else is passed by address.
3439 if (!V->getType()->isPointerTy()) {
3440 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3441 Builder.CreateStore(V, Ptr);
3442 V = Ptr.getPointer();
3443 }
3444 return Builder.CreatePtrToInt(V, TargetTy);
3445}
3446
3447/// Emit a representation of a SourceLocation for passing to a handler
3448/// in a sanitizer runtime library. The format for this data is:
3449/// \code
3450/// struct SourceLocation {
3451/// const char *Filename;
3452/// int32_t Line, Column;
3453/// };
3454/// \endcode
3455/// For an invalid SourceLocation, the Filename pointer is null.
3457 llvm::Constant *Filename;
3458 int Line, Column;
3459
3461 if (PLoc.isValid()) {
3462 StringRef FilenameString = PLoc.getFilename();
3463
3464 int PathComponentsToStrip =
3465 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3466 if (PathComponentsToStrip < 0) {
3467 assert(PathComponentsToStrip != INT_MIN);
3468 int PathComponentsToKeep = -PathComponentsToStrip;
3469 auto I = llvm::sys::path::rbegin(FilenameString);
3470 auto E = llvm::sys::path::rend(FilenameString);
3471 while (I != E && --PathComponentsToKeep)
3472 ++I;
3473
3474 FilenameString = FilenameString.substr(I - E);
3475 } else if (PathComponentsToStrip > 0) {
3476 auto I = llvm::sys::path::begin(FilenameString);
3477 auto E = llvm::sys::path::end(FilenameString);
3478 while (I != E && PathComponentsToStrip--)
3479 ++I;
3480
3481 if (I != E)
3482 FilenameString =
3483 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3484 else
3485 FilenameString = llvm::sys::path::filename(FilenameString);
3486 }
3487
3488 auto FilenameGV =
3489 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3491 cast<llvm::GlobalVariable>(
3492 FilenameGV.getPointer()->stripPointerCasts()));
3493 Filename = FilenameGV.getPointer();
3494 Line = PLoc.getLine();
3495 Column = PLoc.getColumn();
3496 } else {
3497 Filename = llvm::Constant::getNullValue(Int8PtrTy);
3498 Line = Column = 0;
3499 }
3500
3501 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3502 Builder.getInt32(Column)};
3503
3504 return llvm::ConstantStruct::getAnon(Data);
3505}
3506
3507namespace {
3508/// Specify under what conditions this check can be recovered
3509enum class CheckRecoverableKind {
3510 /// Always terminate program execution if this check fails.
3512 /// Check supports recovering, runtime has both fatal (noreturn) and
3513 /// non-fatal handlers for this check.
3514 Recoverable,
3515 /// Runtime conditionally aborts, always need to support recovery.
3517};
3518}
3519
3520static CheckRecoverableKind
3522 if (Ordinal == SanitizerKind::SO_Vptr)
3523 return CheckRecoverableKind::AlwaysRecoverable;
3524 else if (Ordinal == SanitizerKind::SO_Return ||
3525 Ordinal == SanitizerKind::SO_Unreachable)
3526 return CheckRecoverableKind::Unrecoverable;
3527 else
3528 return CheckRecoverableKind::Recoverable;
3529}
3530
3531namespace {
3532struct SanitizerHandlerInfo {
3533 char const *const Name;
3534 unsigned Version;
3535};
3536}
3537
3538const SanitizerHandlerInfo SanitizerHandlers[] = {
3539#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3541#undef SANITIZER_CHECK
3542};
3543
3545 llvm::FunctionType *FnType,
3547 SanitizerHandler CheckHandler,
3548 CheckRecoverableKind RecoverKind, bool IsFatal,
3549 llvm::BasicBlock *ContBB, bool NoMerge) {
3550 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3551 std::optional<ApplyDebugLocation> DL;
3552 if (!CGF.Builder.getCurrentDebugLocation()) {
3553 // Ensure that the call has at least an artificial debug location.
3554 DL.emplace(CGF, SourceLocation());
3555 }
3556 bool NeedsAbortSuffix =
3557 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3558 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3559 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3560 const StringRef CheckName = CheckInfo.Name;
3561 std::string FnName = "__ubsan_handle_" + CheckName.str();
3562 if (CheckInfo.Version && !MinimalRuntime)
3563 FnName += "_v" + llvm::utostr(CheckInfo.Version);
3564 if (MinimalRuntime)
3565 FnName += "_minimal";
3566 if (NeedsAbortSuffix)
3567 FnName += "_abort";
3568 bool MayReturn =
3569 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3570
3571 llvm::AttrBuilder B(CGF.getLLVMContext());
3572 if (!MayReturn) {
3573 B.addAttribute(llvm::Attribute::NoReturn)
3574 .addAttribute(llvm::Attribute::NoUnwind);
3575 }
3576 B.addUWTableAttr(llvm::UWTableKind::Default);
3577
3578 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3579 FnType, FnName,
3580 llvm::AttributeList::get(CGF.getLLVMContext(),
3581 llvm::AttributeList::FunctionIndex, B),
3582 /*Local=*/true);
3583 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3584 NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().OptimizationLevel ||
3585 (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3586 if (NoMerge)
3587 HandlerCall->addFnAttr(llvm::Attribute::NoMerge);
3588 if (!MayReturn) {
3589 HandlerCall->setDoesNotReturn();
3590 CGF.Builder.CreateUnreachable();
3591 } else {
3592 CGF.Builder.CreateBr(ContBB);
3593 }
3594}
3595
3597 ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
3598 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3599 ArrayRef<llvm::Value *> DynamicArgs) {
3600 assert(IsSanitizerScope);
3601 assert(Checked.size() > 0);
3602 assert(CheckHandler >= 0 &&
3603 size_t(CheckHandler) < std::size(SanitizerHandlers));
3604 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3605
3606 llvm::Value *FatalCond = nullptr;
3607 llvm::Value *RecoverableCond = nullptr;
3608 llvm::Value *TrapCond = nullptr;
3609 bool NoMerge = false;
3610 for (auto &[Check, Ord] : Checked) {
3611 // -fsanitize-trap= overrides -fsanitize-recover=.
3612 llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond
3614 ? RecoverableCond
3615 : FatalCond;
3616 Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
3617
3619 NoMerge = true;
3620 }
3621
3623 llvm::Value *Allow =
3624 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3625 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));
3626
3627 for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {
3628 if (*Cond)
3629 *Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));
3630 }
3631 }
3632
3633 if (TrapCond)
3634 EmitTrapCheck(TrapCond, CheckHandler, NoMerge);
3635 if (!FatalCond && !RecoverableCond)
3636 return;
3637
3638 llvm::Value *JointCond;
3639 if (FatalCond && RecoverableCond)
3640 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3641 else
3642 JointCond = FatalCond ? FatalCond : RecoverableCond;
3643 assert(JointCond);
3644
3645 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3646 assert(SanOpts.has(Checked[0].second));
3647#ifndef NDEBUG
3648 for (int i = 1, n = Checked.size(); i < n; ++i) {
3649 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3650 "All recoverable kinds in a single check must be same!");
3651 assert(SanOpts.has(Checked[i].second));
3652 }
3653#endif
3654
3655 llvm::BasicBlock *Cont = createBasicBlock("cont");
3656 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3657 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3658 // Give hint that we very much don't expect to execute the handler
3659 llvm::MDBuilder MDHelper(getLLVMContext());
3660 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3661 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3662 EmitBlock(Handlers);
3663
3664 // Handler functions take an i8* pointing to the (handler-specific) static
3665 // information block, followed by a sequence of intptr_t arguments
3666 // representing operand values.
3669 if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3670 Args.reserve(DynamicArgs.size() + 1);
3671 ArgTypes.reserve(DynamicArgs.size() + 1);
3672
3673 // Emit handler arguments and create handler function type.
3674 if (!StaticArgs.empty()) {
3675 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3676 auto *InfoPtr = new llvm::GlobalVariable(
3677 CGM.getModule(), Info->getType(), false,
3678 llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
3679 llvm::GlobalVariable::NotThreadLocal,
3680 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3681 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3683 Args.push_back(InfoPtr);
3684 ArgTypes.push_back(Args.back()->getType());
3685 }
3686
3687 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3688 Args.push_back(EmitCheckValue(DynamicArgs[i]));
3689 ArgTypes.push_back(IntPtrTy);
3690 }
3691 }
3692
3693 llvm::FunctionType *FnType =
3694 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3695
3696 if (!FatalCond || !RecoverableCond) {
3697 // Simple case: we need to generate a single handler call, either
3698 // fatal, or non-fatal.
3699 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3700 (FatalCond != nullptr), Cont, NoMerge);
3701 } else {
3702 // Emit two handler calls: first one for set of unrecoverable checks,
3703 // another one for recoverable.
3704 llvm::BasicBlock *NonFatalHandlerBB =
3705 createBasicBlock("non_fatal." + CheckName);
3706 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3707 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3708 EmitBlock(FatalHandlerBB);
3709 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3710 NonFatalHandlerBB, NoMerge);
3711 EmitBlock(NonFatalHandlerBB);
3712 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3713 Cont, NoMerge);
3714 }
3715
3716 EmitBlock(Cont);
3717}
3718
3720 SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond,
3721 llvm::ConstantInt *TypeId, llvm::Value *Ptr,
3722 ArrayRef<llvm::Constant *> StaticArgs) {
3723 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3724
3725 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3726 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3727
3728 llvm::MDBuilder MDHelper(getLLVMContext());
3729 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3730 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3731
3732 EmitBlock(CheckBB);
3733
3734 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Ordinal);
3735
3736 llvm::CallInst *CheckCall;
3737 llvm::FunctionCallee SlowPathFn;
3738 if (WithDiag) {
3739 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3740 auto *InfoPtr =
3741 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3742 llvm::GlobalVariable::PrivateLinkage, Info);
3743 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3745
3746 SlowPathFn = CGM.getModule().getOrInsertFunction(
3747 "__cfi_slowpath_diag",
3748 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3749 false));
3750 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3751 } else {
3752 SlowPathFn = CGM.getModule().getOrInsertFunction(
3753 "__cfi_slowpath",
3754 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3755 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3756 }
3757
3759 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3760 CheckCall->setDoesNotThrow();
3761
3762 EmitBlock(Cont);
3763}
3764
3765// Emit a stub for __cfi_check function so that the linker knows about this
3766// symbol in LTO mode.
3768 llvm::Module *M = &CGM.getModule();
3769 ASTContext &C = getContext();
3770 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
3771
3773 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
3774 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
3775 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
3777 FnArgs.push_back(&ArgCallsiteTypeId);
3778 FnArgs.push_back(&ArgAddr);
3779 FnArgs.push_back(&ArgCFICheckFailData);
3780 const CGFunctionInfo &FI =
3782
3783 llvm::Function *F = llvm::Function::Create(
3784 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
3785 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3786 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3788 F->setAlignment(llvm::Align(4096));
3789 CGM.setDSOLocal(F);
3790
3791 llvm::LLVMContext &Ctx = M->getContext();
3792 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3793 // CrossDSOCFI pass is not executed if there is no executable code.
3794 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
3795 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
3796 llvm::ReturnInst::Create(Ctx, nullptr, BB);
3797}
3798
3799// This function is basically a switch over the CFI failure kind, which is
3800// extracted from CFICheckFailData (1st function argument). Each case is either
3801// llvm.trap or a call to one of the two runtime handlers, based on
3802// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3803// failure kind) traps, but this should really never happen. CFICheckFailData
3804// can be nullptr if the calling module has -fsanitize-trap behavior for this
3805// check kind; in this case __cfi_check_fail traps as well.
3807 SanitizerScope SanScope(this);
3808 FunctionArgList Args;
3813 Args.push_back(&ArgData);
3814 Args.push_back(&ArgAddr);
3815
3816 const CGFunctionInfo &FI =
3818
3819 llvm::Function *F = llvm::Function::Create(
3820 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3821 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3822
3823 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3825 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3826
3827 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3828 SourceLocation());
3829
3830 // This function is not affected by NoSanitizeList. This function does
3831 // not have a source location, but "src:*" would still apply. Revert any
3832 // changes to SanOpts made in StartFunction.
3834
3835 llvm::Value *Data =
3836 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3837 CGM.getContext().VoidPtrTy, ArgData.getLocation());
3838 llvm::Value *Addr =
3839 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3840 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3841
3842 // Data == nullptr means the calling module has trap behaviour for this check.
3843 llvm::Value *DataIsNotNullPtr =
3844 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3845 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3846
3847 llvm::StructType *SourceLocationTy =
3848 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3849 llvm::StructType *CfiCheckFailDataTy =
3850 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3851
3852 llvm::Value *V = Builder.CreateConstGEP2_32(
3853 CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0);
3854
3855 Address CheckKindAddr(V, Int8Ty, getIntAlign());
3856 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3857
3858 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3860 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3861 llvm::Value *ValidVtable = Builder.CreateZExt(
3862 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3863 {Addr, AllVtables}),
3864 IntPtrTy);
3865
3866 const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = {
3867 {CFITCK_VCall, SanitizerKind::SO_CFIVCall},
3868 {CFITCK_NVCall, SanitizerKind::SO_CFINVCall},
3869 {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast},
3870 {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast},
3871 {CFITCK_ICall, SanitizerKind::SO_CFIICall}};
3872
3874 Checks;
3875 for (auto CheckKindOrdinalPair : CheckKinds) {
3876 int Kind = CheckKindOrdinalPair.first;
3877 SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second;
3878 llvm::Value *Cond =
3879 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3880 if (CGM.getLangOpts().Sanitize.has(Ordinal))
3881 EmitCheck(std::make_pair(Cond, Ordinal), SanitizerHandler::CFICheckFail,
3882 {}, {Data, Addr, ValidVtable});
3883 else
3884 EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3885 }
3886
3888 // The only reference to this function will be created during LTO link.
3889 // Make sure it survives until then.
3890 CGM.addUsedGlobal(F);
3891}
3892
3894 if (SanOpts.has(SanitizerKind::Unreachable)) {
3895 SanitizerScope SanScope(this);
3896 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3897 SanitizerKind::SO_Unreachable),
3898 SanitizerHandler::BuiltinUnreachable,
3900 }
3901 Builder.CreateUnreachable();
3902}
3903
3904void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
3905 SanitizerHandler CheckHandlerID,
3906 bool NoMerge) {
3907 llvm::BasicBlock *Cont = createBasicBlock("cont");
3908
3909 // If we're optimizing, collapse all calls to trap down to just one per
3910 // check-type per function to save on code size.
3911 if ((int)TrapBBs.size() <= CheckHandlerID)
3912 TrapBBs.resize(CheckHandlerID + 1);
3913
3914 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3915
3916 NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel ||
3917 (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3918
3919 if (TrapBB && !NoMerge) {
3920 auto Call = TrapBB->begin();
3921 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3922
3923 Call->applyMergedLocation(Call->getDebugLoc(),
3924 Builder.getCurrentDebugLocation());
3925 Builder.CreateCondBr(Checked, Cont, TrapBB);
3926 } else {
3927 TrapBB = createBasicBlock("trap");
3928 Builder.CreateCondBr(Checked, Cont, TrapBB);
3929 EmitBlock(TrapBB);
3930
3931 llvm::CallInst *TrapCall =
3932 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3933 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
3934
3935 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3936 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3938 TrapCall->addFnAttr(A);
3939 }
3940 if (NoMerge)
3941 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
3942 TrapCall->setDoesNotReturn();
3943 TrapCall->setDoesNotThrow();
3944 Builder.CreateUnreachable();
3945 }
3946
3947 EmitBlock(Cont);
3948}
3949
3950llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
3951 llvm::CallInst *TrapCall =
3952 Builder.CreateCall(CGM.getIntrinsic(IntrID));
3953
3954 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3955 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3957 TrapCall->addFnAttr(A);
3958 }
3959
3961 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
3962 return TrapCall;
3963}
3964
3966 LValueBaseInfo *BaseInfo,
3967 TBAAAccessInfo *TBAAInfo) {
3968 assert(E->getType()->isArrayType() &&
3969 "Array to pointer decay must have array source type!");
3970
3971 // Expressions of array type can't be bitfields or vector elements.
3972 LValue LV = EmitLValue(E);
3973 Address Addr = LV.getAddress();
3974
3975 // If the array type was an incomplete type, we need to make sure
3976 // the decay ends up being the right type.
3977 llvm::Type *NewTy = ConvertType(E->getType());
3978 Addr = Addr.withElementType(NewTy);
3979
3980 // Note that VLA pointers are always decayed, so we don't need to do
3981 // anything here.
3982 if (!E->getType()->isVariableArrayType()) {
3983 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3984 "Expected pointer to array");
3985 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3986 }
3987
3988 // The result of this decay conversion points to an array element within the
3989 // base lvalue. However, since TBAA currently does not support representing
3990 // accesses to elements of member arrays, we conservatively represent accesses
3991 // to the pointee object as if it had no any base lvalue specified.
3992 // TODO: Support TBAA for member arrays.
3994 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
3995 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
3996
3997 return Addr.withElementType(ConvertTypeForMem(EltType));
3998}
3999
4000/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
4001/// array to pointer, return the array subexpression.
4002static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
4003 // If this isn't just an array->pointer decay, bail out.
4004 const auto *CE = dyn_cast<CastExpr>(E);
4005 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
4006 return nullptr;
4007
4008 // If this is a decay from variable width array, bail out.
4009 const Expr *SubExpr = CE->getSubExpr();
4010 if (SubExpr->getType()->isVariableArrayType())
4011 return nullptr;
4012
4013 return SubExpr;
4014}
4015
4017 llvm::Type *elemType,
4018 llvm::Value *ptr,
4019 ArrayRef<llvm::Value*> indices,
4020 bool inbounds,
4021 bool signedIndices,
4022 SourceLocation loc,
4023 const llvm::Twine &name = "arrayidx") {
4024 if (inbounds) {
4025 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
4027 name);
4028 } else {
4029 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
4030 }
4031}
4032
4035 llvm::Type *elementType, bool inbounds,
4036 bool signedIndices, SourceLocation loc,
4037 CharUnits align,
4038 const llvm::Twine &name = "arrayidx") {
4039 if (inbounds) {
4040 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
4042 align, name);
4043 } else {
4044 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
4045 }
4046}
4047
4049 llvm::Value *idx,
4050 CharUnits eltSize) {
4051 // If we have a constant index, we can use the exact offset of the
4052 // element we're accessing.
4053 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
4054 CharUnits offset = constantIdx->getZExtValue() * eltSize;
4055 return arrayAlign.alignmentAtOffset(offset);
4056
4057 // Otherwise, use the worst-case alignment for any element.
4058 } else {
4059 return arrayAlign.alignmentOfArrayElement(eltSize);
4060 }
4061}
4062
4064 const VariableArrayType *vla) {
4065 QualType eltType;
4066 do {
4067 eltType = vla->getElementType();
4068 } while ((vla = ctx.getAsVariableArrayType(eltType)));
4069 return eltType;
4070}
4071
4073 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4074}
4075
4076static bool hasBPFPreserveStaticOffset(const Expr *E) {
4077 if (!E)
4078 return false;
4079 QualType PointeeType = E->getType()->getPointeeType();
4080 if (PointeeType.isNull())
4081 return false;
4082 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4083 return hasBPFPreserveStaticOffset(BaseDecl);
4084 return false;
4085}
4086
4087// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4089 Address &Addr) {
4090 if (!CGF.getTarget().getTriple().isBPF())
4091 return Addr;
4092
4093 llvm::Function *Fn =
4094 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4095 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4096 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4097}
4098
4099/// Given an array base, check whether its member access belongs to a record
4100/// with preserve_access_index attribute or not.
4101static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4102 if (!ArrayBase || !CGF.getDebugInfo())
4103 return false;
4104
4105 // Only support base as either a MemberExpr or DeclRefExpr.
4106 // DeclRefExpr to cover cases like:
4107 // struct s { int a; int b[10]; };
4108 // struct s *p;
4109 // p[1].a
4110 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4111 // p->b[5] is a MemberExpr example.
4112 const Expr *E = ArrayBase->IgnoreImpCasts();
4113 if (const auto *ME = dyn_cast<MemberExpr>(E))
4114 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4115
4116 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4117 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4118 if (!VarDef)
4119 return false;
4120
4121 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4122 if (!PtrT)
4123 return false;
4124
4125 const auto *PointeeT = PtrT->getPointeeType()
4127 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4128 return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4129 return false;
4130 }
4131
4132 return false;
4133}
4134
4137 QualType eltType, bool inbounds,
4138 bool signedIndices, SourceLocation loc,
4139 QualType *arrayType = nullptr,
4140 const Expr *Base = nullptr,
4141 const llvm::Twine &name = "arrayidx") {
4142 // All the indices except that last must be zero.
4143#ifndef NDEBUG
4144 for (auto *idx : indices.drop_back())
4145 assert(isa<llvm::ConstantInt>(idx) &&
4146 cast<llvm::ConstantInt>(idx)->isZero());
4147#endif
4148
4149 // Determine the element size of the statically-sized base. This is
4150 // the thing that the indices are expressed in terms of.
4151 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4152 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4153 }
4154
4155 // We can use that to compute the best alignment of the element.
4156 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4157 CharUnits eltAlign =
4158 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4159
4161 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4162
4163 llvm::Value *eltPtr;
4164 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4165 if (!LastIndex ||
4167 addr = emitArraySubscriptGEP(CGF, addr, indices,
4168 CGF.ConvertTypeForMem(eltType), inbounds,
4169 signedIndices, loc, eltAlign, name);
4170 return addr;
4171 } else {
4172 // Remember the original array subscript for bpf target
4173 unsigned idx = LastIndex->getZExtValue();
4174 llvm::DIType *DbgInfo = nullptr;
4175 if (arrayType)
4176 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4177 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4178 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4179 idx, DbgInfo);
4180 }
4181
4182 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4183}
4184
4185/// The offset of a field from the beginning of the record.
4187 const FieldDecl *Field, int64_t &Offset) {
4188 ASTContext &Ctx = CGF.getContext();
4189 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4190 unsigned FieldNo = 0;
4191
4192 for (const FieldDecl *FD : RD->fields()) {
4193 if (FD == Field) {
4194 Offset += Layout.getFieldOffset(FieldNo);
4195 return true;
4196 }
4197
4198 QualType Ty = FD->getType();
4199 if (Ty->isRecordType())
4200 if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) {
4201 Offset += Layout.getFieldOffset(FieldNo);
4202 return true;
4203 }
4204
4205 if (!RD->isUnion())
4206 ++FieldNo;
4207 }
4208
4209 return false;
4210}
4211
4212/// Returns the relative offset difference between \p FD1 and \p FD2.
4213/// \code
4214/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4215/// \endcode
4216/// Both fields must be within the same struct.
4217static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4218 const FieldDecl *FD1,
4219 const FieldDecl *FD2) {
4220 const RecordDecl *FD1OuterRec =
4222 const RecordDecl *FD2OuterRec =
4224
4225 if (FD1OuterRec != FD2OuterRec)
4226 // Fields must be within the same RecordDecl.
4227 return std::optional<int64_t>();
4228
4229 int64_t FD1Offset = 0;
4230 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4231 return std::optional<int64_t>();
4232
4233 int64_t FD2Offset = 0;
4234 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4235 return std::optional<int64_t>();
4236
4237 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4238}
4239
4241 bool Accessed) {
4242 // The index must always be an integer, which is not an aggregate. Emit it
4243 // in lexical order (this complexity is, sadly, required by C++17).
4244 llvm::Value *IdxPre =
4245 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4246 bool SignedIndices = false;
4247 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4248 auto *Idx = IdxPre;
4249 if (E->getLHS() != E->getIdx()) {
4250 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4251 Idx = EmitScalarExpr(E->getIdx());
4252 }
4253
4254 QualType IdxTy = E->getIdx()->getType();
4255 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4256 SignedIndices |= IdxSigned;
4257
4258 if (SanOpts.has(SanitizerKind::ArrayBounds))
4259 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4260
4261 // Extend or truncate the index type to 32 or 64-bits.
4262 if (Promote && Idx->getType() != IntPtrTy)
4263 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4264
4265 return Idx;
4266 };
4267 IdxPre = nullptr;
4268
4269 // If the base is a vector type, then we are forming a vector element lvalue
4270 // with this subscript.
4271 if (E->getBase()->getType()->isSubscriptableVectorType() &&
4272 !isa<ExtVectorElementExpr>(E->getBase())) {
4273 // Emit the vector as an lvalue to get its address.
4274 LValue LHS = EmitLValue(E->getBase());
4275 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4276 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4277 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
4278 LHS.getBaseInfo(), TBAAAccessInfo());
4279 }
4280
4281 // All the other cases basically behave like simple offsetting.
4282
4283 // Handle the extvector case we ignored above.
4284 if (isa<ExtVectorElementExpr>(E->getBase())) {
4285 LValue LV = EmitLValue(E->getBase());
4286 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4288
4289 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4290 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4291 SignedIndices, E->getExprLoc());
4292 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4293 CGM.getTBAAInfoForSubobject(LV, EltType));
4294 }
4295
4296 LValueBaseInfo EltBaseInfo;
4297 TBAAAccessInfo EltTBAAInfo;
4298 Address Addr = Address::invalid();
4299 if (const VariableArrayType *vla =
4300 getContext().getAsVariableArrayType(E->getType())) {
4301 // The base must be a pointer, which is not an aggregate. Emit
4302 // it. It needs to be emitted first in case it's what captures
4303 // the VLA bounds.
4304 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4305 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4306
4307 // The element count here is the total number of non-VLA elements.
4308 llvm::Value *numElements = getVLASize(vla).NumElts;
4309
4310 // Effectively, the multiply by the VLA size is part of the GEP.
4311 // GEP indexes are signed, and scaling an index isn't permitted to
4312 // signed-overflow, so we use the same semantics for our explicit
4313 // multiply. We suppress this if overflow is not undefined behavior.
4314 if (getLangOpts().isSignedOverflowDefined()) {
4315 Idx = Builder.CreateMul(Idx, numElements);
4316 } else {
4317 Idx = Builder.CreateNSWMul(Idx, numElements);
4318 }
4319
4320 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4321 !getLangOpts().isSignedOverflowDefined(),
4322 SignedIndices, E->getExprLoc());
4323
4324 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4325 // Indexing over an interface, as in "NSString *P; P[4];"
4326
4327 // Emit the base pointer.
4328 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4329 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4330
4331 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4332 llvm::Value *InterfaceSizeVal =
4333 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4334
4335 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4336
4337 // We don't necessarily build correct LLVM struct types for ObjC
4338 // interfaces, so we can't rely on GEP to do this scaling
4339 // correctly, so we need to cast to i8*. FIXME: is this actually
4340 // true? A lot of other things in the fragile ABI would break...
4341 llvm::Type *OrigBaseElemTy = Addr.getElementType();
4342
4343 // Do the GEP.
4344 CharUnits EltAlign =
4345 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4346 llvm::Value *EltPtr =
4347 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4348 ScaledIdx, false, SignedIndices, E->getExprLoc());
4349 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4350 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4351 // If this is A[i] where A is an array, the frontend will have decayed the
4352 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4353 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4354 // "gep x, i" here. Emit one "gep A, 0, i".
4355 assert(Array->getType()->isArrayType() &&
4356 "Array to pointer decay must have array source type!");
4357 LValue ArrayLV;
4358 // For simple multidimensional array indexing, set the 'accessed' flag for
4359 // better bounds-checking of the base expression.
4360 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4361 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4362 else
4363 ArrayLV = EmitLValue(Array);
4364 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4365
4366 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4367 // If the array being accessed has a "counted_by" attribute, generate
4368 // bounds checking code. The "count" field is at the top level of the
4369 // struct or in an anonymous struct, that's also at the top level. Future
4370 // expansions may allow the "count" to reside at any place in the struct,
4371 // but the value of "counted_by" will be a "simple" path to the count,
4372 // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4373 // similar to emit the correct GEP.
4374 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4375 getLangOpts().getStrictFlexArraysLevel();
4376
4377 if (const auto *ME = dyn_cast<MemberExpr>(Array);
4378 ME &&
4379 ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
4381 const FieldDecl *FAMDecl = cast<FieldDecl>(ME->getMemberDecl());
4382 if (const FieldDecl *CountFD = FAMDecl->findCountedByField()) {
4383 if (std::optional<int64_t> Diff =
4384 getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {
4385 CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);
4386
4387 // Create a GEP with a byte offset between the FAM and count and
4388 // use that to load the count value.
4390 ArrayLV.getAddress(), Int8PtrTy, Int8Ty);
4391
4392 llvm::Type *CountTy = ConvertType(CountFD->getType());
4393 llvm::Value *Res = Builder.CreateInBoundsGEP(
4394 Int8Ty, Addr.emitRawPointer(*this),
4395 Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
4396 Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4397 ".counted_by.load");
4398
4399 // Now emit the bounds checking.
4400 EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),
4401 Array->getType(), Accessed);
4402 }
4403 }
4404 }
4405 }
4406
4407 // Propagate the alignment from the array itself to the result.
4408 QualType arrayType = Array->getType();
4409 Addr = emitArraySubscriptGEP(
4410 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4411 E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
4412 E->getExprLoc(), &arrayType, E->getBase());
4413 EltBaseInfo = ArrayLV.getBaseInfo();
4414 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4415 } else {
4416 // The base must be a pointer; emit it with an estimate of its alignment.
4417 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4418 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4419 QualType ptrType = E->getBase()->getType();
4420 Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
4421 !getLangOpts().isSignedOverflowDefined(),
4422 SignedIndices, E->getExprLoc(), &ptrType,
4423 E->getBase());
4424 }
4425
4426 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4427
4428 if (getLangOpts().ObjC &&
4429 getLangOpts().getGC() != LangOptions::NonGC) {
4432 }
4433 return LV;
4434}
4435
4436llvm::Value *CodeGenFunction::EmitMatrixIndexExpr(const Expr *E) {
4437 llvm::Value *Idx = EmitScalarExpr(E);
4438 if (Idx->getType() == IntPtrTy)
4439 return Idx;
4440 bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType();
4441 return Builder.CreateIntCast(Idx, IntPtrTy, IsSigned);
4442}
4443
4445 assert(
4446 !E->isIncomplete() &&
4447 "incomplete matrix subscript expressions should be rejected during Sema");
4448 LValue Base = EmitLValue(E->getBase());
4449
4450 // Extend or truncate the index type to 32 or 64-bits if needed.
4451 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
4452 llvm::Value *ColIdx = EmitMatrixIndexExpr(E->getColumnIdx());
4453
4454 llvm::Value *NumRows = Builder.getIntN(
4455 RowIdx->getType()->getScalarSizeInBits(),
4456 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
4457 llvm::Value *FinalIdx =
4458 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4459 return LValue::MakeMatrixElt(
4460 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
4461 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4462}
4463
4465 LValueBaseInfo &BaseInfo,
4466 TBAAAccessInfo &TBAAInfo,
4467 QualType BaseTy, QualType ElTy,
4468 bool IsLowerBound) {
4469 LValue BaseLVal;
4470 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4471 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4472 if (BaseTy->isArrayType()) {
4473 Address Addr = BaseLVal.getAddress();
4474 BaseInfo = BaseLVal.getBaseInfo();
4475
4476 // If the array type was an incomplete type, we need to make sure
4477 // the decay ends up being the right type.
4478 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4479 Addr = Addr.withElementType(NewTy);
4480
4481 // Note that VLA pointers are always decayed, so we don't need to do
4482 // anything here.
4483 if (!BaseTy->isVariableArrayType()) {
4484 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4485 "Expected pointer to array");
4486 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4487 }
4488
4489 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4490 }
4491 LValueBaseInfo TypeBaseInfo;
4492 TBAAAccessInfo TypeTBAAInfo;
4493 CharUnits Align =
4494 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4495 BaseInfo.mergeForCast(TypeBaseInfo);
4496 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4497 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
4498 CGF.ConvertTypeForMem(ElTy), Align);
4499 }
4500 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4501}
4502
4504 bool IsLowerBound) {
4505
4506 assert(!E->isOpenACCArraySection() &&
4507 "OpenACC Array section codegen not implemented");
4508
4510 QualType ResultExprTy;
4511 if (auto *AT = getContext().getAsArrayType(BaseTy))
4512 ResultExprTy = AT->getElementType();
4513 else
4514 ResultExprTy = BaseTy->getPointeeType();
4515 llvm::Value *Idx = nullptr;
4516 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4517 // Requesting lower bound or upper bound, but without provided length and
4518 // without ':' symbol for the default length -> length = 1.
4519 // Idx = LowerBound ?: 0;
4520 if (auto *LowerBound = E->getLowerBound()) {
4521 Idx = Builder.CreateIntCast(
4522 EmitScalarExpr(LowerBound), IntPtrTy,
4523 LowerBound->getType()->hasSignedIntegerRepresentation());
4524 } else
4525 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4526 } else {
4527 // Try to emit length or lower bound as constant. If this is possible, 1
4528 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4529 // IR (LB + Len) - 1.
4530 auto &C = CGM.getContext();
4531 auto *Length = E->getLength();
4532 llvm::APSInt ConstLength;
4533 if (Length) {
4534 // Idx = LowerBound + Length - 1;
4535 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4536 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4537 Length = nullptr;
4538 }
4539 auto *LowerBound = E->getLowerBound();
4540 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4541 if (LowerBound) {
4542 if (std::optional<llvm::APSInt> LB =
4543 LowerBound->getIntegerConstantExpr(C)) {
4544 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4545 LowerBound = nullptr;
4546 }
4547 }
4548 if (!Length)
4549 --ConstLength;
4550 else if (!LowerBound)
4551 --ConstLowerBound;
4552
4553 if (Length || LowerBound) {
4554 auto *LowerBoundVal =
4555 LowerBound
4556 ? Builder.CreateIntCast(
4557 EmitScalarExpr(LowerBound), IntPtrTy,
4558 LowerBound->getType()->hasSignedIntegerRepresentation())
4559 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4560 auto *LengthVal =
4561 Length
4562 ? Builder.CreateIntCast(
4563 EmitScalarExpr(Length), IntPtrTy,
4564 Length->getType()->hasSignedIntegerRepresentation())
4565 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4566 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4567 /*HasNUW=*/false,
4568 !getLangOpts().isSignedOverflowDefined());
4569 if (Length && LowerBound) {
4570 Idx = Builder.CreateSub(
4571 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4572 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4573 }
4574 } else
4575 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4576 } else {
4577 // Idx = ArraySize - 1;
4578 QualType ArrayTy = BaseTy->isPointerType()
4579 ? E->getBase()->IgnoreParenImpCasts()->getType()
4580 : BaseTy;
4581 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4582 Length = VAT->getSizeExpr();
4583 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4584 ConstLength = *L;
4585 Length = nullptr;
4586 }
4587 } else {
4588 auto *CAT = C.getAsConstantArrayType(ArrayTy);
4589 assert(CAT && "unexpected type for array initializer");
4590 ConstLength = CAT->getSize();
4591 }
4592 if (Length) {
4593 auto *LengthVal = Builder.CreateIntCast(
4594 EmitScalarExpr(Length), IntPtrTy,
4595 Length->getType()->hasSignedIntegerRepresentation());
4596 Idx = Builder.CreateSub(
4597 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4598 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4599 } else {
4600 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4601 --ConstLength;
4602 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4603 }
4604 }
4605 }
4606 assert(Idx);
4607
4608 Address EltPtr = Address::invalid();
4609 LValueBaseInfo BaseInfo;
4610 TBAAAccessInfo TBAAInfo;
4611 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4612 // The base must be a pointer, which is not an aggregate. Emit
4613 // it. It needs to be emitted first in case it's what captures
4614 // the VLA bounds.
4615 Address Base =
4616 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4617 BaseTy, VLA->getElementType(), IsLowerBound);
4618 // The element count here is the total number of non-VLA elements.
4619 llvm::Value *NumElements = getVLASize(VLA).NumElts;
4620
4621 // Effectively, the multiply by the VLA size is part of the GEP.
4622 // GEP indexes are signed, and scaling an index isn't permitted to
4623 // signed-overflow, so we use the same semantics for our explicit
4624 // multiply. We suppress this if overflow is not undefined behavior.
4625 if (getLangOpts().isSignedOverflowDefined())
4626 Idx = Builder.CreateMul(Idx, NumElements);
4627 else
4628 Idx = Builder.CreateNSWMul(Idx, NumElements);
4629 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4630 !getLangOpts().isSignedOverflowDefined(),
4631 /*signedIndices=*/false, E->getExprLoc());
4632 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4633 // If this is A[i] where A is an array, the frontend will have decayed the
4634 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4635 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4636 // "gep x, i" here. Emit one "gep A, 0, i".
4637 assert(Array->getType()->isArrayType() &&
4638 "Array to pointer decay must have array source type!");
4639 LValue ArrayLV;
4640 // For simple multidimensional array indexing, set the 'accessed' flag for
4641 // better bounds-checking of the base expression.
4642 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4643 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4644 else
4645 ArrayLV = EmitLValue(Array);
4646
4647 // Propagate the alignment from the array itself to the result.
4648 EltPtr = emitArraySubscriptGEP(
4649 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4650 ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
4651 /*signedIndices=*/false, E->getExprLoc());
4652 BaseInfo = ArrayLV.getBaseInfo();
4653 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4654 } else {
4655 Address Base =
4656 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
4657 ResultExprTy, IsLowerBound);
4658 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4659 !getLangOpts().isSignedOverflowDefined(),
4660 /*signedIndices=*/false, E->getExprLoc());
4661 }
4662
4663 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4664}
4665
4668 // Emit the base vector as an l-value.
4669 LValue Base;
4670
4671 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4672 if (E->isArrow()) {
4673 // If it is a pointer to a vector, emit the address and form an lvalue with
4674 // it.
4675 LValueBaseInfo BaseInfo;
4676 TBAAAccessInfo TBAAInfo;
4677 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4678 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4679 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4680 Base.getQuals().removeObjCGCAttr();
4681 } else if (E->getBase()->isGLValue()) {
4682 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4683 // emit the base as an lvalue.
4684 assert(E->getBase()->getType()->isVectorType());
4685 Base = EmitLValue(E->getBase());
4686 } else {
4687 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4688 assert(E->getBase()->getType()->isVectorType() &&
4689 "Result must be a vector");
4690 llvm::Value *Vec = EmitScalarExpr(E->getBase());
4691
4692 // Store the vector to memory (because LValue wants an address).
4693 Address VecMem = CreateMemTemp(E->getBase()->getType());
4694 Builder.CreateStore(Vec, VecMem);
4695 Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4697 }
4698
4699 QualType type =
4700 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4701
4702 // Encode the element access list into a vector of unsigned indices.
4704 E->getEncodedElementAccess(Indices);
4705
4706 if (Base.isSimple()) {
4707 llvm::Constant *CV =
4708 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4709 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
4710 Base.getBaseInfo(), TBAAAccessInfo());
4711 }
4712 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4713
4714 llvm::Constant *BaseElts = Base.getExtVectorElts();
4716
4717 for (unsigned i = 0, e = Indices.size(); i != e; ++i)
4718 CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4719 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4720 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4721 Base.getBaseInfo(), TBAAAccessInfo());
4722}
4723
4726 EmitIgnoredExpr(E->getBase());
4727 return EmitDeclRefLValue(DRE);
4728 }
4729
4730 Expr *BaseExpr = E->getBase();
4731 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4732 LValue BaseLV;
4733 if (E->isArrow()) {
4734 LValueBaseInfo BaseInfo;
4735 TBAAAccessInfo TBAAInfo;
4736 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4737 QualType PtrTy = BaseExpr->getType()->getPointeeType();
4738 SanitizerSet SkippedChecks;
4739 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4740 if (IsBaseCXXThis)
4741 SkippedChecks.set(SanitizerKind::Alignment, true);
4742 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4743 SkippedChecks.set(SanitizerKind::Null, true);
4744 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,
4745 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4746 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4747 } else
4748 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4749
4750 NamedDecl *ND = E->getMemberDecl();
4751 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4752 LValue LV = EmitLValueForField(BaseLV, Field);
4754 if (getLangOpts().OpenMP) {
4755 // If the member was explicitly marked as nontemporal, mark it as
4756 // nontemporal. If the base lvalue is marked as nontemporal, mark access
4757 // to children as nontemporal too.
4758 if ((IsWrappedCXXThis(BaseExpr) &&
4760 BaseLV.isNontemporal())
4761 LV.setNontemporal(/*Value=*/true);
4762 }
4763 return LV;
4764 }
4765
4766 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4767 return EmitFunctionDeclLValue(*this, E, FD);
4768
4769 llvm_unreachable("Unhandled member declaration!");
4770}
4771
4772/// Given that we are currently emitting a lambda, emit an l-value for
4773/// one of its members.
4774///
4776 llvm::Value *ThisValue) {
4777 bool HasExplicitObjectParameter = false;
4778 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
4779 if (MD) {
4780 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
4781 assert(MD->getParent()->isLambda());
4782 assert(MD->getParent() == Field->getParent());
4783 }
4784 LValue LambdaLV;
4785 if (HasExplicitObjectParameter) {
4786 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
4787 auto It = LocalDeclMap.find(D);
4788 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
4789 Address AddrOfExplicitObject = It->getSecond();
4790 if (D->getType()->isReferenceType())
4791 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
4793 else
4794 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
4795 D->getType().getNonReferenceType());
4796
4797 // Make sure we have an lvalue to the lambda itself and not a derived class.
4798 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
4799 auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
4800 if (ThisTy != LambdaTy) {
4801 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
4803 LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
4804 BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
4805 LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
4806 }
4807 } else {
4808 QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
4809 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
4810 }
4811 return EmitLValueForField(LambdaLV, Field);
4812}
4813
4815 return EmitLValueForLambdaField(Field, CXXABIThisValue);
4816}
4817
4818/// Get the field index in the debug info. The debug info structure/union
4819/// will ignore the unnamed bitfields.
4821 unsigned FieldIndex) {
4822 unsigned I = 0, Skipped = 0;
4823
4824 for (auto *F : Rec->getDefinition()->fields()) {
4825 if (I == FieldIndex)
4826 break;
4827 if (F->isUnnamedBitField())
4828 Skipped++;
4829 I++;
4830 }
4831
4832 return FieldIndex - Skipped;
4833}
4834
4835/// Get the address of a zero-sized field within a record. The resulting
4836/// address doesn't necessarily have the right type.
4838 const FieldDecl *Field) {
4840 CGF.getContext().getFieldOffset(Field));
4841 if (Offset.isZero())
4842 return Base;
4843 Base = Base.withElementType(CGF.Int8Ty);
4844 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
4845}
4846
4847/// Drill down to the storage of a field without walking into
4848/// reference types.
4849///
4850/// The resulting address doesn't necessarily have the right type.
4852 const FieldDecl *field) {
4853 if (isEmptyFieldForLayout(CGF.getContext(), field))
4854 return emitAddrOfZeroSizeField(CGF, base, field);
4855
4856 const RecordDecl *rec = field->getParent();
4857
4858 unsigned idx =
4859 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4860
4861 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4862}
4863
4865 Address addr, const FieldDecl *field) {
4866 const RecordDecl *rec = field->getParent();
4867 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4868 base.getType(), rec->getLocation());
4869
4870 unsigned idx =
4871 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4872
4874 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4875}
4876
4877static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4878 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4879 if (!RD)
4880 return false;
4881
4882 if (RD->isDynamicClass())
4883 return true;
4884
4885 for (const auto &Base : RD->bases())
4886 if (hasAnyVptr(Base.getType(), Context))
4887 return true;
4888
4889 for (const FieldDecl *Field : RD->fields())
4890 if (hasAnyVptr(Field->getType(), Context))
4891 return true;
4892
4893 return false;
4894}
4895
4897 const FieldDecl *field) {
4898 LValueBaseInfo BaseInfo = base.getBaseInfo();
4899
4900 if (field->isBitField()) {
4901 const CGRecordLayout &RL =
4903 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4904 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
4905 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
4906 Info.VolatileStorageSize != 0 &&
4907 field->getType()
4910 Address Addr = base.getAddress();
4911 unsigned Idx = RL.getLLVMFieldNo(field);
4912 const RecordDecl *rec = field->getParent();
4914 Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);
4915 if (!UseVolatile) {
4916 if (!IsInPreservedAIRegion &&
4917 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4918 if (Idx != 0)
4919 // For structs, we GEP to the field that the record layout suggests.
4920 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4921 } else {
4922 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4923 getContext().getRecordType(rec), rec->getLocation());
4925 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
4926 DbgInfo);
4927 }
4928 }
4929 const unsigned SS =
4930 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
4931 // Get the access type.
4932 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
4933 Addr = Addr.withElementType(FieldIntTy);
4934 if (UseVolatile) {
4935 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
4936 if (VolatileOffset)
4937 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
4938 }
4939
4940 QualType fieldType =
4941 field->getType().withCVRQualifiers(base.getVRQualifiers());
4942 // TODO: Support TBAA for bit fields.
4943 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4944 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4945 TBAAAccessInfo());
4946 }
4947
4948 // Fields of may-alias structures are may-alias themselves.
4949 // FIXME: this should get propagated down through anonymous structs
4950 // and unions.
4951 QualType FieldType = field->getType();
4952 const RecordDecl *rec = field->getParent();
4953 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4954 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4955 TBAAAccessInfo FieldTBAAInfo;
4956 if (base.getTBAAInfo().isMayAlias() ||
4957 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
4958 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4959 } else if (rec->isUnion()) {
4960 // TODO: Support TBAA for unions.
4961 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4962 } else {
4963 // If no base type been assigned for the base access, then try to generate
4964 // one for this base lvalue.
4965 FieldTBAAInfo = base.getTBAAInfo();
4966 if (!FieldTBAAInfo.BaseType) {
4967 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4968 assert(!FieldTBAAInfo.Offset &&
4969 "Nonzero offset for an access with no base type!");
4970 }
4971
4972 // Adjust offset to be relative to the base type.
4973 const ASTRecordLayout &Layout =
4975 unsigned CharWidth = getContext().getCharWidth();
4976 if (FieldTBAAInfo.BaseType)
4977 FieldTBAAInfo.Offset +=
4978 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4979
4980 // Update the final access type and size.
4981 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4982 FieldTBAAInfo.Size =
4984 }
4985
4986 Address addr = base.getAddress();
4988 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
4989 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
4990 if (CGM.getCodeGenOpts().StrictVTablePointers &&
4991 ClassDef->isDynamicClass()) {
4992 // Getting to any field of dynamic object requires stripping dynamic
4993 // information provided by invariant.group. This is because accessing
4994 // fields may leak the real address of dynamic object, which could result
4995 // in miscompilation when leaked pointer would be compared.
4996 auto *stripped =
4998 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
4999 }
5000 }
5001
5002 unsigned RecordCVR = base.getVRQualifiers();
5003 if (rec->isUnion()) {
5004 // For unions, there is no pointer adjustment.
5005 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5006 hasAnyVptr(FieldType, getContext()))
5007 // Because unions can easily skip invariant.barriers, we need to add
5008 // a barrier every time CXXRecord field with vptr is referenced.
5010
5012 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5013 // Remember the original union field index
5014 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
5015 rec->getLocation());
5016 addr =
5018 addr.emitRawPointer(*this),
5019 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
5020 addr.getElementType(), addr.getAlignment());
5021 }
5022
5023 if (FieldType->isReferenceType())
5024 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5025 } else {
5026 if (!IsInPreservedAIRegion &&
5027 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
5028 // For structs, we GEP to the field that the record layout suggests.
5029 addr = emitAddrOfFieldStorage(*this, addr, field);
5030 else
5031 // Remember the original struct field index
5032 addr = emitPreserveStructAccess(*this, base, addr, field);
5033 }
5034
5035 // If this is a reference field, load the reference right now.
5036 if (FieldType->isReferenceType()) {
5037 LValue RefLVal =
5038 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5039 if (RecordCVR & Qualifiers::Volatile)
5040 RefLVal.getQuals().addVolatile();
5041 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
5042
5043 // Qualifiers on the struct don't apply to the referencee.
5044 RecordCVR = 0;
5045 FieldType = FieldType->getPointeeType();
5046 }
5047
5048 // Make sure that the address is pointing to the right type. This is critical
5049 // for both unions and structs.
5050 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5051
5052 if (field->hasAttr<AnnotateAttr>())
5053 addr = EmitFieldAnnotations(field, addr);
5054
5055 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5056 LV.getQuals().addCVRQualifiers(RecordCVR);
5057
5058 // __weak attribute on a field is ignored.
5061
5062 return LV;
5063}
5064
5065LValue
5067 const FieldDecl *Field) {
5068 QualType FieldType = Field->getType();
5069
5070 if (!FieldType->isReferenceType())
5071 return EmitLValueForField(Base, Field);
5072
5073 Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
5074
5075 // Make sure that the address is pointing to the right type.
5076 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
5077 V = V.withElementType(llvmType);
5078
5079 // TODO: Generate TBAA information that describes this access as a structure
5080 // member access and not just an access to an object of the field's type. This
5081 // should be similar to what we do in EmitLValueForField().
5082 LValueBaseInfo BaseInfo = Base.getBaseInfo();
5083 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5084 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
5085 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
5086 CGM.getTBAAInfoForSubobject(Base, FieldType));
5087}
5088
5090 if (E->isFileScope()) {
5092 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5093 }
5095 // make sure to emit the VLA size.
5097
5098 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5099 const Expr *InitExpr = E->getInitializer();
5101
5102 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5103 /*Init*/ true);
5104
5105 // Block-scope compound literals are destroyed at the end of the enclosing
5106 // scope in C.
5107 if (!getLangOpts().CPlusPlus)
5110 E->getType(), getDestroyer(DtorKind),
5111 DtorKind & EHCleanup);
5112
5113 return Result;
5114}
5115
5117 if (!E->isGLValue())
5118 // Initializing an aggregate temporary in C++11: T{...}.
5119 return EmitAggExprToLValue(E);
5120
5121 // An lvalue initializer list must be initializing a reference.
5122 assert(E->isTransparent() && "non-transparent glvalue init list");
5123 return EmitLValue(E->getInit(0));
5124}
5125
5126/// Emit the operand of a glvalue conditional operator. This is either a glvalue
5127/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5128/// LValue is returned and the current block has been terminated.
5129static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5130 const Expr *Operand) {
5131 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5132 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5133 return std::nullopt;
5134 }
5135
5136 return CGF.EmitLValue(Operand);
5137}
5138
5139namespace {
5140// Handle the case where the condition is a constant evaluatable simple integer,
5141// which means we don't have to separately handle the true/false blocks.
5142std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5144 const Expr *condExpr = E->getCond();
5145 bool CondExprBool;
5146 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5147 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5148 if (!CondExprBool)
5149 std::swap(Live, Dead);
5150
5151 if (!CGF.ContainsLabel(Dead)) {
5152 // If the true case is live, we need to track its region.
5153 if (CondExprBool)
5155 CGF.markStmtMaybeUsed(Dead);
5156 // If a throw expression we emit it and return an undefined lvalue
5157 // because it can't be used.
5158 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5159 CGF.EmitCXXThrowExpr(ThrowExpr);
5160 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5161 llvm::Type *Ty = CGF.UnqualPtrTy;
5162 return CGF.MakeAddrLValue(
5163 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5164 Dead->getType());
5165 }
5166 return CGF.EmitLValue(Live);
5167 }
5168 }
5169 return std::nullopt;
5170}
5171struct ConditionalInfo {
5172 llvm::BasicBlock *lhsBlock, *rhsBlock;
5173 std::optional<LValue> LHS, RHS;
5174};
5175
5176// Create and generate the 3 blocks for a conditional operator.
5177// Leaves the 'current block' in the continuation basic block.
5178template<typename FuncTy>
5179ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5181 const FuncTy &BranchGenFunc) {
5182 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
5183 CGF.createBasicBlock("cond.false"), std::nullopt,
5184 std::nullopt};
5185 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
5186
5187 CodeGenFunction::ConditionalEvaluation eval(CGF);
5188 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
5189 CGF.getProfileCount(E));
5190
5191 // Any temporaries created here are conditional.
5192 CGF.EmitBlock(Info.lhsBlock);
5194 eval.begin(CGF);
5195 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5196 eval.end(CGF);
5197 Info.lhsBlock = CGF.Builder.GetInsertBlock();
5198
5199 if (Info.LHS)
5200 CGF.Builder.CreateBr(endBlock);
5201
5202 // Any temporaries created here are conditional.
5203 CGF.EmitBlock(Info.rhsBlock);
5204 eval.begin(CGF);
5205 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5206 eval.end(CGF);
5207 Info.rhsBlock = CGF.Builder.GetInsertBlock();
5208 CGF.EmitBlock(endBlock);
5209
5210 return Info;
5211}
5212} // namespace
5213
5216 if (!E->isGLValue()) {
5217 // ?: here should be an aggregate.
5219 "Unexpected conditional operator!");
5220 return (void)EmitAggExprToLValue(E);
5221 }
5222
5223 OpaqueValueMapping binding(*this, E);
5224 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
5225 return;
5226
5227 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
5228 CGF.EmitIgnoredExpr(E);
5229 return LValue{};
5230 });
5231}
5234 if (!expr->isGLValue()) {
5235 // ?: here should be an aggregate.
5236 assert(hasAggregateEvaluationKind(expr->getType()) &&
5237 "Unexpected conditional operator!");
5238 return EmitAggExprToLValue(expr);
5239 }
5240
5241 OpaqueValueMapping binding(*this, expr);
5242 if (std::optional<LValue> Res =
5243 HandleConditionalOperatorLValueSimpleCase(*this, expr))
5244 return *Res;
5245
5246 ConditionalInfo Info = EmitConditionalBlocks(
5247 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
5248 return EmitLValueOrThrowExpression(CGF, E);
5249 });
5250
5251 if ((Info.LHS && !Info.LHS->isSimple()) ||
5252 (Info.RHS && !Info.RHS->isSimple()))
5253 return EmitUnsupportedLValue(expr, "conditional operator");
5254
5255 if (Info.LHS && Info.RHS) {
5256 Address lhsAddr = Info.LHS->getAddress();
5257 Address rhsAddr = Info.RHS->getAddress();
5259 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
5260 Builder.GetInsertBlock(), expr->getType());
5261 AlignmentSource alignSource =
5262 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
5263 Info.RHS->getBaseInfo().getAlignmentSource());
5265 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
5266 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
5267 TBAAInfo);
5268 } else {
5269 assert((Info.LHS || Info.RHS) &&
5270 "both operands of glvalue conditional are throw-expressions?");
5271 return Info.LHS ? *Info.LHS : *Info.RHS;
5272 }
5273}
5274
5275/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5276/// type. If the cast is to a reference, we can have the usual lvalue result,
5277/// otherwise if a cast is needed by the code generator in an lvalue context,
5278/// then it must mean that we need the address of an aggregate in order to
5279/// access one of its members. This can happen for all the reasons that casts
5280/// are permitted with aggregate result, including noop aggregate casts, and
5281/// cast from scalar to union.
5283 switch (E->getCastKind()) {
5284 case CK_ToVoid:
5285 case CK_BitCast:
5286 case CK_LValueToRValueBitCast:
5287 case CK_ArrayToPointerDecay:
5288 case CK_FunctionToPointerDecay:
5289 case CK_NullToMemberPointer:
5290 case CK_NullToPointer:
5291 case CK_IntegralToPointer:
5292 case CK_PointerToIntegral:
5293 case CK_PointerToBoolean:
5294 case CK_IntegralCast:
5295 case CK_BooleanToSignedIntegral:
5296 case CK_IntegralToBoolean:
5297 case CK_IntegralToFloating:
5298 case CK_FloatingToIntegral:
5299 case CK_FloatingToBoolean:
5300 case CK_FloatingCast:
5301 case CK_FloatingRealToComplex:
5302 case CK_FloatingComplexToReal:
5303 case CK_FloatingComplexToBoolean:
5304 case CK_FloatingComplexCast:
5305 case CK_FloatingComplexToIntegralComplex:
5306 case CK_IntegralRealToComplex:
5307 case CK_IntegralComplexToReal:
5308 case CK_IntegralComplexToBoolean:
5309 case CK_IntegralComplexCast:
5310 case CK_IntegralComplexToFloatingComplex:
5311 case CK_DerivedToBaseMemberPointer:
5312 case CK_BaseToDerivedMemberPointer:
5313 case CK_MemberPointerToBoolean:
5314 case CK_ReinterpretMemberPointer:
5315 case CK_AnyPointerToBlockPointerCast:
5316 case CK_ARCProduceObject:
5317 case CK_ARCConsumeObject:
5318 case CK_ARCReclaimReturnedObject:
5319 case CK_ARCExtendBlockObject:
5320 case CK_CopyAndAutoreleaseBlockObject:
5321 case CK_IntToOCLSampler:
5322 case CK_FloatingToFixedPoint:
5323 case CK_FixedPointToFloating:
5324 case CK_FixedPointCast:
5325 case CK_FixedPointToBoolean:
5326 case CK_FixedPointToIntegral:
5327 case CK_IntegralToFixedPoint:
5328 case CK_MatrixCast:
5329 case CK_HLSLVectorTruncation:
5330 case CK_HLSLArrayRValue:
5331 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5332
5333 case CK_Dependent:
5334 llvm_unreachable("dependent cast kind in IR gen!");
5335
5336 case CK_BuiltinFnToFnPtr:
5337 llvm_unreachable("builtin functions are handled elsewhere");
5338
5339 // These are never l-values; just use the aggregate emission code.
5340 case CK_NonAtomicToAtomic:
5341 case CK_AtomicToNonAtomic:
5342 return EmitAggExprToLValue(E);
5343
5344 case CK_Dynamic: {
5345 LValue LV = EmitLValue(E->getSubExpr());
5346 Address V = LV.getAddress();
5347 const auto *DCE = cast<CXXDynamicCastExpr>(E);
5349 }
5350
5351 case CK_ConstructorConversion:
5352 case CK_UserDefinedConversion:
5353 case CK_CPointerToObjCPointerCast:
5354 case CK_BlockPointerToObjCPointerCast:
5355 case CK_LValueToRValue:
5356 return EmitLValue(E->getSubExpr());
5357
5358 case CK_NoOp: {
5359 // CK_NoOp can model a qualification conversion, which can remove an array
5360 // bound and change the IR type.
5361 // FIXME: Once pointee types are removed from IR, remove this.
5362 LValue LV = EmitLValue(E->getSubExpr());
5363 // Propagate the volatile qualifer to LValue, if exist in E.
5364 if (E->changesVolatileQualification())
5365 LV.getQuals() = E->getType().getQualifiers();
5366 if (LV.isSimple()) {
5367 Address V = LV.getAddress();
5368 if (V.isValid()) {
5369 llvm::Type *T = ConvertTypeForMem(E->getType());
5370 if (V.getElementType() != T)
5371 LV.setAddress(V.withElementType(T));
5372 }
5373 }
5374 return LV;
5375 }
5376
5377 case CK_UncheckedDerivedToBase:
5378 case CK_DerivedToBase: {
5379 const auto *DerivedClassTy =
5380 E->getSubExpr()->getType()->castAs<RecordType>();
5381 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5382
5383 LValue LV = EmitLValue(E->getSubExpr());
5384 Address This = LV.getAddress();
5385
5386 // Perform the derived-to-base conversion
5388 This, DerivedClassDecl, E->path_begin(), E->path_end(),
5389 /*NullCheckValue=*/false, E->getExprLoc());
5390
5391 // TODO: Support accesses to members of base classes in TBAA. For now, we
5392 // conservatively pretend that the complete object is of the base class
5393 // type.
5394 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5396 }
5397 case CK_ToUnion:
5398 return EmitAggExprToLValue(E);
5399 case CK_BaseToDerived: {
5400 const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
5401 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5402
5403 LValue LV = EmitLValue(E->getSubExpr());
5404
5405 // Perform the base-to-derived conversion
5407 LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
5408 /*NullCheckValue=*/false);
5409
5410 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5411 // performed and the object is not of the derived type.
5414 E->getType());
5415
5416 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5418 /*MayBeNull=*/false, CFITCK_DerivedCast,
5419 E->getBeginLoc());
5420
5421 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5423 }
5424 case CK_LValueBitCast: {
5425 // This must be a reinterpret_cast (or c-style equivalent).
5426 const auto *CE = cast<ExplicitCastExpr>(E);
5427
5428 CGM.EmitExplicitCastExprType(CE, this);
5429 LValue LV = EmitLValue(E->getSubExpr());
5431 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5432
5433 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5435 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5436 E->getBeginLoc());
5437
5438 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5440 }
5441 case CK_AddressSpaceConversion: {
5442 LValue LV = EmitLValue(E->getSubExpr());
5444 llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5445 *this, LV.getPointer(*this),
5446 E->getSubExpr()->getType().getAddressSpace(),
5447 E->getType().getAddressSpace(), ConvertType(DestTy));
5449 LV.getAddress().getAlignment()),
5450 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5451 }
5452 case CK_ObjCObjectLValueCast: {
5453 LValue LV = EmitLValue(E->getSubExpr());
5455 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5457 }
5458 case CK_ZeroToOCLOpaqueType:
5459 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5460
5461 case CK_VectorSplat: {
5462 // LValue results of vector splats are only supported in HLSL.
5463 if (!getLangOpts().HLSL)
5464 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5465 return EmitLValue(E->getSubExpr());
5466 }
5467 }
5468
5469 llvm_unreachable("Unhandled lvalue cast kind?");
5470}
5471
5475}
5476
5477std::pair<LValue, LValue>
5479 // Emitting the casted temporary through an opaque value.
5480 LValue BaseLV = EmitLValue(E->getArgLValue());
5481 OpaqueValueMappingData::bind(*this, E->getOpaqueArgLValue(), BaseLV);
5482
5483 QualType ExprTy = E->getType();
5484 Address OutTemp = CreateIRTemp(ExprTy);
5485 LValue TempLV = MakeAddrLValue(OutTemp, ExprTy);
5486
5487 if (E->isInOut())
5488 EmitInitializationToLValue(E->getCastedTemporary()->getSourceExpr(),
5489 TempLV);
5490
5491 OpaqueValueMappingData::bind(*this, E->getCastedTemporary(), TempLV);
5492 return std::make_pair(BaseLV, TempLV);
5493}
5494
5496 CallArgList &Args, QualType Ty) {
5497
5498 auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty);
5499
5500 llvm::Value *Addr = TempLV.getAddress().getBasePointer();
5501 llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
5502
5503 llvm::TypeSize Sz = CGM.getDataLayout().getTypeAllocSize(ElTy);
5504
5505 llvm::Value *LifetimeSize = EmitLifetimeStart(Sz, Addr);
5506
5507 Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
5508 Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast(),
5509 LifetimeSize);
5510 Args.add(RValue::get(TmpAddr, *this), Ty);
5511 return TempLV;
5512}
5513
5514LValue
5517
5518 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5519 it = OpaqueLValues.find(e);
5520
5521 if (it != OpaqueLValues.end())
5522 return it->second;
5523
5524 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5525 return EmitLValue(e->getSourceExpr());
5526}
5527
5528RValue
5531
5532 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5533 it = OpaqueRValues.find(e);
5534
5535 if (it != OpaqueRValues.end())
5536 return it->second;
5537
5538 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5539 return EmitAnyExpr(e->getSourceExpr());
5540}
5541
5543 const FieldDecl *FD,
5545 QualType FT = FD->getType();
5546 LValue FieldLV = EmitLValueForField(LV, FD);
5547 switch (getEvaluationKind(FT)) {
5548 case TEK_Complex:
5549 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5550 case TEK_Aggregate:
5551 return FieldLV.asAggregateRValue();
5552 case TEK_Scalar:
5553 // This routine is used to load fields one-by-one to perform a copy, so
5554 // don't load reference fields.
5555 if (FD->getType()->isReferenceType())
5556 return RValue::get(FieldLV.getPointer(*this));
5557 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5558 // primitive load.
5559 if (FieldLV.isBitField())
5560 return EmitLoadOfLValue(FieldLV, Loc);
5561 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
5562 }
5563 llvm_unreachable("bad evaluation kind");
5564}
5565
5566//===--------------------------------------------------------------------===//
5567// Expression Emission
5568//===--------------------------------------------------------------------===//
5569
5571 ReturnValueSlot ReturnValue,
5572 llvm::CallBase **CallOrInvoke) {
5573 llvm::CallBase *CallOrInvokeStorage;
5574 if (!CallOrInvoke) {
5575 CallOrInvoke = &CallOrInvokeStorage;
5576 }
5577
5578 auto AddCoroElideSafeOnExit = llvm::make_scope_exit([&] {
5579 if (E->isCoroElideSafe()) {
5580 auto *I = *CallOrInvoke;
5581 if (I)
5582 I->addFnAttr(llvm::Attribute::CoroElideSafe);
5583 }
5584 });
5585
5586 // Builtins never have block type.
5587 if (E->getCallee()->getType()->isBlockPointerType())
5588 return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke);
5589
5590 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
5591 return EmitCXXMemberCallExpr(CE, ReturnValue, CallOrInvoke);
5592
5593 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
5594 return EmitCUDAKernelCallExpr(CE, ReturnValue, CallOrInvoke);
5595
5596 // A CXXOperatorCallExpr is created even for explicit object methods, but
5597 // these should be treated like static function call.
5598 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
5599 if (const auto *MD =
5600 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
5601 MD && MD->isImplicitObjectMemberFunction())
5602 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue, CallOrInvoke);
5603
5604 CGCallee callee = EmitCallee(E->getCallee());
5605
5606 if (callee.isBuiltin()) {
5607 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
5608 E, ReturnValue);
5609 }
5610
5611 if (callee.isPseudoDestructor()) {
5613 }
5614
5615 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue,
5616 /*Chain=*/nullptr, CallOrInvoke);
5617}
5618
5619/// Emit a CallExpr without considering whether it might be a subclass.
5621 ReturnValueSlot ReturnValue,
5622 llvm::CallBase **CallOrInvoke) {
5623 CGCallee Callee = EmitCallee(E->getCallee());
5624 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
5625 /*Chain=*/nullptr, CallOrInvoke);
5626}
5627
5628// Detect the unusual situation where an inline version is shadowed by a
5629// non-inline version. In that case we should pick the external one
5630// everywhere. That's GCC behavior too.
5632 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
5633 if (!PD->isInlineBuiltinDeclaration())
5634 return false;
5635 return true;
5636}
5637
5639 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
5640
5641 if (auto builtinID = FD->getBuiltinID()) {
5642 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
5643 std::string NoBuiltins = "no-builtins";
5644
5645 StringRef Ident = CGF.CGM.getMangledName(GD);
5646 std::string FDInlineName = (Ident + ".inline").str();
5647
5648 bool IsPredefinedLibFunction =
5650 bool HasAttributeNoBuiltin =
5651 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
5652 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
5653
5654 // When directing calling an inline builtin, call it through it's mangled
5655 // name to make it clear it's not the actual builtin.
5656 if (CGF.CurFn->getName() != FDInlineName &&
5658 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5659 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
5660 llvm::Module *M = Fn->getParent();
5661 llvm::Function *Clone = M->getFunction(FDInlineName);
5662 if (!Clone) {
5663 Clone = llvm::Function::Create(Fn->getFunctionType(),
5664 llvm::GlobalValue::InternalLinkage,
5665 Fn->getAddressSpace(), FDInlineName, M);
5666 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
5667 }
5668 return CGCallee::forDirect(Clone, GD);
5669 }
5670
5671 // Replaceable builtins provide their own implementation of a builtin. If we
5672 // are in an inline builtin implementation, avoid trivial infinite
5673 // recursion. Honor __attribute__((no_builtin("foo"))) or
5674 // __attribute__((no_builtin)) on the current function unless foo is
5675 // not a predefined library function which means we must generate the
5676 // builtin no matter what.
5677 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
5678 return CGCallee::forBuiltin(builtinID, FD);
5679 }
5680
5681 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5682 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
5683 FD->hasAttr<CUDAGlobalAttr>())
5684 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
5685 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
5686
5687 return CGCallee::forDirect(CalleePtr, GD);
5688}
5689
5691 E = E->IgnoreParens();
5692
5693 // Look through function-to-pointer decay.
5694 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
5695 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
5696 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
5697 return EmitCallee(ICE->getSubExpr());
5698 }
5699
5700 // Resolve direct calls.
5701 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
5702 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
5703 return EmitDirectCallee(*this, FD);
5704 }
5705 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
5706 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
5707 EmitIgnoredExpr(ME->getBase());
5708 return EmitDirectCallee(*this, FD);
5709 }
5710
5711 // Look through template substitutions.
5712 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
5713 return EmitCallee(NTTP->getReplacement());
5714
5715 // Treat pseudo-destructor calls differently.
5716 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
5718 }
5719
5720 // Otherwise, we have an indirect reference.
5721 llvm::Value *calleePtr;
5723 if (auto ptrType = E->getType()->getAs<PointerType>()) {
5724 calleePtr = EmitScalarExpr(E);
5725 functionType = ptrType->getPointeeType();
5726 } else {
5727 functionType = E->getType();
5728 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
5729 }
5730 assert(functionType->isFunctionType());
5731
5732 GlobalDecl GD;
5733 if (const auto *VD =
5734 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
5735 GD = GlobalDecl(VD);
5736
5737 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
5739 CGCallee callee(calleeInfo, calleePtr, pointerAuth);
5740 return callee;
5741}
5742
5744 // Comma expressions just emit their LHS then their RHS as an l-value.
5745 if (E->getOpcode() == BO_Comma) {
5746 EmitIgnoredExpr(E->getLHS());
5748 return EmitLValue(E->getRHS());
5749 }
5750
5751 if (E->getOpcode() == BO_PtrMemD ||
5752 E->getOpcode() == BO_PtrMemI)
5754
5755 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
5756
5757 // Note that in all of these cases, __block variables need the RHS
5758 // evaluated first just in case the variable gets moved by the RHS.
5759
5760 switch (getEvaluationKind(E->getType())) {
5761 case TEK_Scalar: {
5762 switch (E->getLHS()->getType().getObjCLifetime()) {
5764 return EmitARCStoreStrong(E, /*ignored*/ false).first;
5765
5767 return EmitARCStoreAutoreleasing(E).first;
5768
5769 // No reason to do any of these differently.
5773 break;
5774 }
5775
5776 // TODO: Can we de-duplicate this code with the corresponding code in
5777 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
5778 RValue RV;
5779 llvm::Value *Previous = nullptr;
5780 QualType SrcType = E->getRHS()->getType();
5781 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5782 // we want to extract that value and potentially (if the bitfield sanitizer
5783 // is enabled) use it to check for an implicit conversion.
5784 if (E->getLHS()->refersToBitField()) {
5785 llvm::Value *RHS =
5787 RV = RValue::get(RHS);
5788 } else
5789 RV = EmitAnyExpr(E->getRHS());
5790
5791 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
5792
5793 if (RV.isScalar())
5795
5796 if (LV.isBitField()) {
5797 llvm::Value *Result = nullptr;
5798 // If bitfield sanitizers are enabled we want to use the result
5799 // to check whether a truncation or sign change has occurred.
5800 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
5802 else
5804
5805 // If the expression contained an implicit conversion, make sure
5806 // to use the value before the scalar conversion.
5807 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
5808 QualType DstType = E->getLHS()->getType();
5809 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
5810 LV.getBitFieldInfo(), E->getExprLoc());
5811 } else
5812 EmitStoreThroughLValue(RV, LV);
5813
5814 if (getLangOpts().OpenMP)
5816 E->getLHS());
5817 return LV;
5818 }
5819
5820 case TEK_Complex:
5822
5823 case TEK_Aggregate:
5824 // If the lang opt is HLSL and the LHS is a constant array
5825 // then we are performing a copy assignment and call a special
5826 // function because EmitAggExprToLValue emits to a temporary LValue
5827 if (getLangOpts().HLSL && E->getLHS()->getType()->isConstantArrayType())
5829
5830 return EmitAggExprToLValue(E);
5831 }
5832 llvm_unreachable("bad evaluation kind");
5833}
5834
5835// This function implements trivial copy assignment for HLSL's
5836// assignable constant arrays.
5838 // Don't emit an LValue for the RHS because it might not be an LValue
5839 LValue LHS = EmitLValue(E->getLHS());
5840 // In C the RHS of an assignment operator is an RValue.
5841 // EmitAggregateAssign takes anan LValue for the RHS. Instead we can call
5842 // EmitInitializationToLValue to emit an RValue into an LValue.
5843 EmitInitializationToLValue(E->getRHS(), LHS);
5844 return LHS;
5845}
5846
5848 llvm::CallBase **CallOrInvoke) {
5849 RValue RV = EmitCallExpr(E, ReturnValueSlot(), CallOrInvoke);
5850
5851 if (!RV.isScalar())
5854
5855 assert(E->getCallReturnType(getContext())->isReferenceType() &&
5856 "Can't have a scalar return unless the return type is a "
5857 "reference type!");
5858
5860}
5861
5863 // FIXME: This shouldn't require another copy.
5864 return EmitAggExprToLValue(E);
5865}
5866
5869 && "binding l-value to type which needs a temporary");
5871 EmitCXXConstructExpr(E, Slot);
5873}
5874
5875LValue
5878}
5879
5881 return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
5883}
5884
5888}
5889
5890LValue
5892 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
5894 EmitAggExpr(E->getSubExpr(), Slot);
5895 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
5897}
5898
5901
5902 if (!RV.isScalar())
5905
5906 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
5907 "Can't have a scalar return unless the return type is a "
5908 "reference type!");
5909
5911}
5912
5914 Address V =
5915 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
5917}
5918
5920 const ObjCIvarDecl *Ivar) {
5921 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
5922}
5923
5924llvm::Value *
5926 const ObjCIvarDecl *Ivar) {
5927 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
5928 QualType PointerDiffType = getContext().getPointerDiffType();
5929 return Builder.CreateZExtOrTrunc(OffsetValue,
5930 getTypes().ConvertType(PointerDiffType));
5931}
5932
5934 llvm::Value *BaseValue,
5935 const ObjCIvarDecl *Ivar,
5936 unsigned CVRQualifiers) {
5937 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
5938 Ivar, CVRQualifiers);
5939}
5940
5942 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5943 llvm::Value *BaseValue = nullptr;
5944 const Expr *BaseExpr = E->getBase();
5945 Qualifiers BaseQuals;
5946 QualType ObjectTy;
5947 if (E->isArrow()) {
5948 BaseValue = EmitScalarExpr(BaseExpr);
5949 ObjectTy = BaseExpr->getType()->getPointeeType();
5950 BaseQuals = ObjectTy.getQualifiers();
5951 } else {
5952 LValue BaseLV = EmitLValue(BaseExpr);
5953 BaseValue = BaseLV.getPointer(*this);
5954 ObjectTy = BaseExpr->getType();
5955 BaseQuals = ObjectTy.getQualifiers();
5956 }
5957
5958 LValue LV =
5959 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
5960 BaseQuals.getCVRQualifiers());
5962 return LV;
5963}
5964
5966 // Can only get l-value for message expression returning aggregate type
5970}
5971
5973 const CGCallee &OrigCallee, const CallExpr *E,
5974 ReturnValueSlot ReturnValue,
5975 llvm::Value *Chain,
5976 llvm::CallBase **CallOrInvoke,
5977 CGFunctionInfo const **ResolvedFnInfo) {
5978 // Get the actual function type. The callee type will always be a pointer to
5979 // function type or a block pointer type.
5980 assert(CalleeType->isFunctionPointerType() &&
5981 "Call must have function pointer type!");
5982
5983 const Decl *TargetDecl =
5984 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
5985
5986 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
5987 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
5988 "trying to emit a call to an immediate function");
5989
5990 CalleeType = getContext().getCanonicalType(CalleeType);
5991
5992 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
5993
5994 CGCallee Callee = OrigCallee;
5995
5996 if (SanOpts.has(SanitizerKind::Function) &&
5997 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
5998 !isa<FunctionNoProtoType>(PointeeType)) {
5999 if (llvm::Constant *PrefixSig =
6001 SanitizerScope SanScope(this);
6002 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
6003
6004 llvm::Type *PrefixSigType = PrefixSig->getType();
6005 llvm::StructType *PrefixStructTy = llvm::StructType::get(
6006 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
6007
6008 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6010 // Use raw pointer since we are using the callee pointer as data here.
6011 Address Addr =
6012 Address(CalleePtr, CalleePtr->getType(),
6014 CalleePtr->getPointerAlignment(CGM.getDataLayout())),
6015 Callee.getPointerAuthInfo(), nullptr);
6016 CalleePtr = Addr.emitRawPointer(*this);
6017 }
6018
6019 // On 32-bit Arm, the low bit of a function pointer indicates whether
6020 // it's using the Arm or Thumb instruction set. The actual first
6021 // instruction lives at the same address either way, so we must clear
6022 // that low bit before using the function address to find the prefix
6023 // structure.
6024 //
6025 // This applies to both Arm and Thumb target triples, because
6026 // either one could be used in an interworking context where it
6027 // might be passed function pointers of both types.
6028 llvm::Value *AlignedCalleePtr;
6029 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
6030 llvm::Value *CalleeAddress =
6031 Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
6032 llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
6033 llvm::Value *AlignedCalleeAddress =
6034 Builder.CreateAnd(CalleeAddress, Mask);
6035 AlignedCalleePtr =
6036 Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
6037 } else {
6038 AlignedCalleePtr = CalleePtr;
6039 }
6040
6041 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
6042 llvm::Value *CalleeSigPtr =
6043 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
6044 llvm::Value *CalleeSig =
6045 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
6046 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
6047
6048 llvm::BasicBlock *Cont = createBasicBlock("cont");
6049 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
6050 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
6051
6052 EmitBlock(TypeCheck);
6053 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
6054 Int32Ty,
6055 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
6056 getPointerAlign());
6057 llvm::Value *CalleeTypeHashMatch =
6058 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
6059 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
6060 EmitCheckTypeDescriptor(CalleeType)};
6061 EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::SO_Function),
6062 SanitizerHandler::FunctionTypeMismatch, StaticData,
6063 {CalleePtr});
6064
6065 Builder.CreateBr(Cont);
6066 EmitBlock(Cont);
6067 }
6068 }
6069
6070 const auto *FnType = cast<FunctionType>(PointeeType);
6071
6072 // If we are checking indirect calls and this call is indirect, check that the
6073 // function pointer is a member of the bit set for the function type.
6074 if (SanOpts.has(SanitizerKind::CFIICall) &&
6075 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6076 SanitizerScope SanScope(this);
6077 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
6078
6079 llvm::Metadata *MD;
6080 if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
6082 else
6084
6085 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
6086
6087 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6088 llvm::Value *TypeTest = Builder.CreateCall(
6089 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
6090
6091 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
6092 llvm::Constant *StaticData[] = {
6093 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
6096 };
6097 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
6098 EmitCfiSlowPathCheck(SanitizerKind::SO_CFIICall, TypeTest, CrossDsoTypeId,
6099 CalleePtr, StaticData);
6100 } else {
6101 EmitCheck(std::make_pair(TypeTest, SanitizerKind::SO_CFIICall),
6102 SanitizerHandler::CFICheckFail, StaticData,
6103 {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
6104 }
6105 }
6106
6107 CallArgList Args;
6108 if (Chain)
6109 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
6110
6111 // C++17 requires that we evaluate arguments to a call using assignment syntax
6112 // right-to-left, and that we evaluate arguments to certain other operators
6113 // left-to-right. Note that we allow this to override the order dictated by
6114 // the calling convention on the MS ABI, which means that parameter
6115 // destruction order is not necessarily reverse construction order.
6116 // FIXME: Revisit this based on C++ committee response to unimplementability.
6118 bool StaticOperator = false;
6119 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
6120 if (OCE->isAssignmentOp())
6122 else {
6123 switch (OCE->getOperator()) {
6124 case OO_LessLess:
6125 case OO_GreaterGreater:
6126 case OO_AmpAmp:
6127 case OO_PipePipe:
6128 case OO_Comma:
6129 case OO_ArrowStar:
6131 break;
6132 default:
6133 break;
6134 }
6135 }
6136
6137 if (const auto *MD =
6138 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
6139 MD && MD->isStatic())
6140 StaticOperator = true;
6141 }
6142
6143 auto Arguments = E->arguments();
6144 if (StaticOperator) {
6145 // If we're calling a static operator, we need to emit the object argument
6146 // and ignore it.
6147 EmitIgnoredExpr(E->getArg(0));
6148 Arguments = drop_begin(Arguments, 1);
6149 }
6150 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
6151 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
6152
6154 Args, FnType, /*ChainCall=*/Chain);
6155
6156 if (ResolvedFnInfo)
6157 *ResolvedFnInfo = &FnInfo;
6158
6159 // HIP function pointer contains kernel handle when it is used in triple
6160 // chevron. The kernel stub needs to be loaded from kernel handle and used
6161 // as callee.
6162 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
6163 isa<CUDAKernelCallExpr>(E) &&
6164 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6165 llvm::Value *Handle = Callee.getFunctionPointer();
6166 auto *Stub = Builder.CreateLoad(
6167 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
6168 Callee.setFunctionPointer(Stub);
6169 }
6170 llvm::CallBase *LocalCallOrInvoke = nullptr;
6171 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke,
6172 E == MustTailCall, E->getExprLoc());
6173
6174 // Generate function declaration DISuprogram in order to be used
6175 // in debug info about call sites.
6176 if (CGDebugInfo *DI = getDebugInfo()) {
6177 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
6178 FunctionArgList Args;
6179 QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
6180 DI->EmitFuncDeclForCallSite(LocalCallOrInvoke,
6181 DI->getFunctionType(CalleeDecl, ResTy, Args),
6182 CalleeDecl);
6183 }
6184 }
6185 if (CallOrInvoke)
6186 *CallOrInvoke = LocalCallOrInvoke;
6187
6188 return Call;
6189}
6190
6193 Address BaseAddr = Address::invalid();
6194 if (E->getOpcode() == BO_PtrMemI) {
6195 BaseAddr = EmitPointerWithAlignment(E->getLHS());
6196 } else {
6197 BaseAddr = EmitLValue(E->getLHS()).getAddress();
6198 }
6199
6200 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
6201 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6202
6203 LValueBaseInfo BaseInfo;
6204 TBAAAccessInfo TBAAInfo;
6205 Address MemberAddr =
6206 EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo,
6207 &TBAAInfo);
6208
6209 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
6210}
6211
6212/// Given the address of a temporary variable, produce an r-value of
6213/// its type.
6215 QualType type,
6216 SourceLocation loc) {
6218 switch (getEvaluationKind(type)) {
6219 case TEK_Complex:
6220 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
6221 case TEK_Aggregate:
6222 return lvalue.asAggregateRValue();
6223 case TEK_Scalar:
6224 return RValue::get(EmitLoadOfScalar(lvalue, loc));
6225 }
6226 llvm_unreachable("bad evaluation kind");
6227}
6228
6229void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6230 assert(Val->getType()->isFPOrFPVectorTy());
6231 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6232 return;
6233
6234 llvm::MDBuilder MDHelper(getLLVMContext());
6235 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6236
6237 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
6238}
6239
6240void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
6241 llvm::Type *EltTy = Val->getType()->getScalarType();
6242 if (!EltTy->isFloatTy())
6243 return;
6244
6245 if ((getLangOpts().OpenCL &&
6246 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6247 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6248 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6249 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6250 //
6251 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6252 // build option allows an application to specify that single precision
6253 // floating-point divide (x/y and 1/x) and sqrt used in the program
6254 // source are correctly rounded.
6255 //
6256 // TODO: CUDA has a prec-sqrt flag
6257 SetFPAccuracy(Val, 3.0f);
6258 }
6259}
6260
6261void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
6262 llvm::Type *EltTy = Val->getType()->getScalarType();
6263 if (!EltTy->isFloatTy())
6264 return;
6265
6266 if ((getLangOpts().OpenCL &&
6267 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6268 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6269 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6270 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6271 //
6272 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6273 // build option allows an application to specify that single precision
6274 // floating-point divide (x/y and 1/x) and sqrt used in the program
6275 // source are correctly rounded.
6276 //
6277 // TODO: CUDA has a prec-div flag
6278 SetFPAccuracy(Val, 2.5f);
6279 }
6280}
6281
6282namespace {
6283 struct LValueOrRValue {
6284 LValue LV;
6285 RValue RV;
6286 };
6287}
6288
6289static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
6290 const PseudoObjectExpr *E,
6291 bool forLValue,
6292 AggValueSlot slot) {
6294
6295 // Find the result expression, if any.
6296 const Expr *resultExpr = E->getResultExpr();
6297 LValueOrRValue result;
6298
6300 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
6301 const Expr *semantic = *i;
6302
6303 // If this semantic expression is an opaque value, bind it
6304 // to the result of its source expression.
6305 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
6306 // Skip unique OVEs.
6307 if (ov->isUnique()) {
6308 assert(ov != resultExpr &&
6309 "A unique OVE cannot be used as the result expression");
6310 continue;
6311 }
6312
6313 // If this is the result expression, we may need to evaluate
6314 // directly into the slot.
6315 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
6316 OVMA opaqueData;
6317 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
6319 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
6320 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
6322 opaqueData = OVMA::bind(CGF, ov, LV);
6323 result.RV = slot.asRValue();
6324
6325 // Otherwise, emit as normal.
6326 } else {
6327 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
6328
6329 // If this is the result, also evaluate the result now.
6330 if (ov == resultExpr) {
6331 if (forLValue)
6332 result.LV = CGF.EmitLValue(ov);
6333 else
6334 result.RV = CGF.EmitAnyExpr(ov, slot);
6335 }
6336 }
6337
6338 opaques.push_back(opaqueData);
6339
6340 // Otherwise, if the expression is the result, evaluate it
6341 // and remember the result.
6342 } else if (semantic == resultExpr) {
6343 if (forLValue)
6344 result.LV = CGF.EmitLValue(semantic);
6345 else
6346 result.RV = CGF.EmitAnyExpr(semantic, slot);
6347
6348 // Otherwise, evaluate the expression in an ignored context.
6349 } else {
6350 CGF.EmitIgnoredExpr(semantic);
6351 }
6352 }
6353
6354 // Unbind all the opaques now.
6355 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
6356 opaques[i].unbind(CGF);
6357
6358 return result;
6359}
6360
6362 AggValueSlot slot) {
6363 return emitPseudoObjectExpr(*this, E, false, slot).RV;
6364}
6365
6367 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
6368}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3453
DynTypedNode Node
Defines enum values for all the target-independent builtin functions.
CodeGenFunction::ComplexPairTy ComplexPairTy
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false)
Definition: CGExpr.cpp:2703
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM)
Named Registers are named metadata pointing to the register name which will be read from/written to a...
Definition: CGExpr.cpp:2946
static llvm::Value * emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, llvm::Value *Ptr)
Definition: CGExpr.cpp:690
static const Expr * isSimpleArrayDecayOperand(const Expr *E)
isSimpleArrayDecayOperand - If the specified expr is a simple decay from an array to pointer,...
Definition: CGExpr.cpp:4002
static bool hasBooleanRepresentation(QualType Ty)
Definition: CGExpr.cpp:1881
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, int64_t &Offset)
The offset of a field from the beginning of the record.
Definition: CGExpr.cpp:4186
static bool hasBPFPreserveStaticOffset(const RecordDecl *D)
Definition: CGExpr.cpp:4072
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field)
Drill down to the storage of a field without walking into reference types.
Definition: CGExpr.cpp:4851
ConstantEmissionKind
Can we constant-emit a load of a reference to a variable of the given type? This is different from pr...
Definition: CGExpr.cpp:1735
@ CEK_AsReferenceOnly
Definition: CGExpr.cpp:1737
@ CEK_AsValueOnly
Definition: CGExpr.cpp:1739
@ CEK_None
Definition: CGExpr.cpp:1736
@ CEK_AsValueOrReference
Definition: CGExpr.cpp:1738
static bool isConstantEmittableObjectType(QualType type)
Given an object of the given canonical type, can we safely copy a value out of it based on its initia...
Definition: CGExpr.cpp:1710
static QualType getFixedSizeElementType(const ASTContext &ctx, const VariableArrayType *vla)
Definition: CGExpr.cpp:4063
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue)
Definition: CGExpr.cpp:2934
static std::optional< LValue > EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand)
Emit the operand of a glvalue conditional operator.
Definition: CGExpr.cpp:5129
static CheckRecoverableKind getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal)
Definition: CGExpr.cpp:3521
static llvm::Value * emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Type *elemType, llvm::Value *ptr, ArrayRef< llvm::Value * > indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name="arrayidx")
Definition: CGExpr.cpp:4016
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD)
Definition: CGExpr.cpp:2925
static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, CodeGenFunction &CGF, bool IsVector=true)
Definition: CGExpr.cpp:2104
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot)
Definition: CGExpr.cpp:6289
static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, Address &Addr)
Definition: CGExpr.cpp:4088
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, const MemberExpr *ME)
Definition: CGExpr.cpp:1845
static llvm::Value * getArrayIndexingBound(CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)
If Base is known to point to the start of an array, return the length of that array.
Definition: CGExpr.cpp:962
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2201
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type)
Definition: CGExpr.cpp:1741
static QualType getConstantExprReferredType(const FullExpr *E, const ASTContext &Ctx)
Definition: CGExpr.cpp:1542
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool IsBool)
Definition: CGExpr.cpp:1894
static std::optional< int64_t > getOffsetDifferenceInBits(CodeGenFunction &CGF, const FieldDecl *FD1, const FieldDecl *FD2)
Returns the relative offset difference between FD1 and FD2.
Definition: CGExpr.cpp:4217
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD)
Definition: CGExpr.cpp:5638
static LValue EmitThreadPrivateVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc)
Definition: CGExpr.cpp:2800
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, RecIndicesTy &Indices)
Definition: CGExpr.cpp:1112
static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD)
Definition: CGExpr.cpp:5631
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD)
Definition: CGExpr.cpp:2874
static bool hasAnyVptr(const QualType Type, const ASTContext &Context)
Definition: CGExpr.cpp:4877
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase)
Given an array base, check whether its member access belongs to a record with preserve_access_index a...
Definition: CGExpr.cpp:4101
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T)
Definition: CGExpr.cpp:2814
static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, const DeclRefExpr *E, const VarDecl *VD)
Determine whether we can emit a reference to VD from the current context, despite not necessarily hav...
Definition: CGExpr.cpp:2971
VariableTypeDescriptorKind
Definition: CGExpr.cpp:68
@ TK_Float
A floating-point type.
Definition: CGExpr.cpp:72
@ TK_Unknown
Any other type. The value representation is unspecified.
Definition: CGExpr.cpp:76
@ TK_Integer
An integer type.
Definition: CGExpr.cpp:70
@ TK_BitInt
An _BitInt(N) type.
Definition: CGExpr.cpp:74
static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx, CharUnits eltSize)
Definition: CGExpr.cpp:4048
static RawAddress createReferenceTemporary(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *Inner, RawAddress *Alloca=nullptr)
Definition: CGExpr.cpp:436
static bool isAAPCS(const TargetInfo &TargetInfo)
Helper method to check if the underlying ABI is AAPCS.
Definition: CGExpr.cpp:484
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2129
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition: CGExpr.cpp:1281
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field)
Definition: CGExpr.cpp:4864
const SanitizerHandlerInfo SanitizerHandlers[]
Definition: CGExpr.cpp:3538
static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, const FieldDecl *Field)
Get the address of a zero-sized field within a record.
Definition: CGExpr.cpp:4837
static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef< llvm::Value * > FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB, bool NoMerge)
Definition: CGExpr.cpp:3544
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound)
Definition: CGExpr.cpp:4464
static void pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *E, Address ReferenceTemporary)
Definition: CGExpr.cpp:320
const Decl * D
Expr * E
StringRef Filename
Definition: Format.cpp:3051
static const SanitizerMask AlwaysRecoverable
static const SanitizerMask Unrecoverable
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
SourceLocation Loc
Definition: SemaObjC.cpp:759
Defines the SourceManager interface.
static QualType getPointeeType(const MemRegion *R)
StateNode * Previous
const LValueBase getLValueBase() const
Definition: APValue.cpp:973
bool isLValue() const
Definition: APValue.h:448
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
SourceManager & getSourceManager()
Definition: ASTContext.h:741
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2716
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType VoidPtrTy
Definition: ASTContext.h:1187
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:682
const LangOptions & getLangOpts() const
Definition: ASTContext.h:834
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
Definition: ASTContext.cpp:854
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType BoolTy
Definition: ASTContext.h:1161
const NoSanitizeList & getNoSanitizeList() const
Definition: ASTContext.h:844
llvm::DenseMap< const CXXMethodDecl *, CXXCastPath > LambdaCastPaths
For capturing lambdas with an explicit object parameter whose type is derived from the lambda type,...
Definition: ASTContext.h:1256
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2482
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
Definition: ASTContext.h:1160
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2918
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2486
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4224
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition: Expr.h:6986
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Definition: Expr.cpp:5184
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2718
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3577
QualType getElementType() const
Definition: Type.h:3589
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3909
A fixed int type of a specified bitwidth.
Definition: Type.h:7819
bool isPredefinedLibFunction(unsigned ID) const
Determines whether this builtin is a predefined libc/libm function, such as "malloc",...
Definition: Builtins.h:161
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1491
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2817
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1378
bool isDynamicClass() const
Definition: DeclCXX.h:586
bool hasDefinition() const
Definition: DeclCXX.h:572
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition: ExprCXX.h:1066
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3547
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition: CharUnits.h:207
llvm::MaybeAlign getAsMaybeAlign() const
getAsMaybeAlign - Returns Quantity as a valid llvm::Align or std::nullopt, Beware llvm::MaybeAlign as...
Definition: CharUnits.h:194
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
SanitizerSet SanitizeMergeHandlers
Set of sanitizer checks that can merge handlers (smaller code size at the expense of debuggability).
PointerAuthOptions PointerAuth
Configuration for pointer-signing.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
SanitizerSet SanitizeRecover
Set of sanitizer checks that are non-fatal (i.e.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
llvm::Value * getBasePointer() const
Definition: Address.h:193
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition: Address.h:259
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
Definition: Address.h:231
Address setKnownNonNull()
Definition: Address.h:236
void setAlignment(CharUnits Value)
Definition: Address.h:191
void replaceBasePointer(llvm::Value *P)
This function is used in situations where the caller is doing some sort of opaque "laundering" of the...
Definition: Address.h:181
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
Address getAddress() const
Definition: CGValue.h:644
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:613
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:602
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
RValue asRValue() const
Definition: CGValue.h:666
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:858
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:305
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:292
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:203
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition: CGBuilder.h:331
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition: CGBuilder.h:241
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:413
Address CreateLaunderInvariantGroup(Address Addr)
Definition: CGBuilder.h:437
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:128
Address CreatePreserveUnionAccessIndex(Address Addr, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:429
Address CreateStripInvariantGroup(Address Addr)
Definition: CGBuilder.h:443
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:189
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
Definition: CGBuilder.h:261
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:346
virtual llvm::Function * getKernelStub(llvm::GlobalValue *Handle)=0
Get kernel stub by kernel handle.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:97
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:113
Abstract information about a function or function prototype.
Definition: CGCall.h:41
const GlobalDecl getCalleeDecl() const
Definition: CGCall.h:59
All available information about a concrete callee.
Definition: CGCall.h:63
CGCalleeInfo getAbstractInfo() const
Definition: CGCall.h:180
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition: CGCall.h:172
bool isPseudoDestructor() const
Definition: CGCall.h:169
static CGCallee forBuiltin(unsigned builtinID, const FunctionDecl *builtinDecl)
Definition: CGCall.h:123
unsigned getBuiltinID() const
Definition: CGCall.h:164
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:137
bool isBuiltin() const
Definition: CGCall.h:157
const FunctionDecl * getBuiltinDecl() const
Definition: CGCall.h:160
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E)
Definition: CGCall.h:131
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
llvm::DIType * getOrCreateStandaloneType(QualType Ty, SourceLocation Loc)
Emit standalone debug info for a type.
llvm::DIType * getOrCreateRecordType(QualType Ty, SourceLocation L)
Emit record type's standalone debug info.
CGFunctionInfo - Class to encapsulate the information about a function definition.
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, llvm::Value *ivarOffset)=0
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)=0
virtual llvm::Value * EmitIvarOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)=0
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, Address AddrWeakObj)=0
virtual Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel)=0
Get the address of a selector for the specified name and type values.
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, bool threadlocal=false)=0
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
bool isNontemporalDecl(const ValueDecl *VD) const
Checks if the VD variable is marked as nontemporal declaration in current context.
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
bool containsFieldDecl(const FieldDecl *FD) const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
void add(RValue rvalue, QualType type)
Definition: CGCall.h:305
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr, llvm::Value *lifetimeSz=nullptr)
Definition: CGCall.h:326
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
RValue EmitLoadOfGlobalRegLValue(LValue LV)
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitCastLValue(const CastExpr *E)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue EmitCoawaitLValue(const CoawaitExpr *E)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
LValue EmitCallExprLValue(const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E)
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
llvm::Value * EmitARCLoadWeakRetained(Address addr)
const LangOptions & getLangOpts() const
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
LValue EmitInitListLValue(const InitListExpr *E)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Address EmitExtVectorElementLValue(LValue V)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Upcast
Checking the operand of a cast to a base object.
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void markStmtMaybeUsed(const Stmt *S)
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
llvm::Value * EmitARCLoadWeak(Address addr)
const TargetInfo & getTarget() const
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
LValue EmitVAArgExprLValue(const VAArgExpr *E)
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
LValue EmitCoyieldLValue(const CoyieldExpr *E)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, llvm::Value *Index, QualType IndexType, QualType IndexedType, bool Accessed)
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitLoadOfExtVectorElementLValue(LValue V)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
LValue EmitDeclRefLValue(const DeclRefExpr *E)
const TargetCodeGenInfo & getTargetHooks() const
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
LValue EmitPredefinedLValue(const PredefinedExpr *E)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
CGCallee EmitCallee(const Expr *E)
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
std::pair< LValue, LValue > EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
LValue EmitMemberExpr(const MemberExpr *E)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
CleanupKind getCleanupKind(QualType::DestructionKind kind)
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
Address GetAddrOfBlockDecl(const VarDecl *var)
CodeGenTypes & getTypes() const
void EmitARCInitWeak(Address addr, llvm::Value *value)
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
LValue EmitStringLiteralLValue(const StringLiteral *E)
static Destroyer destroyARCStrongPrecise
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
static Destroyer destroyARCStrongImprecise
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
LValue EmitStmtExprLValue(const StmtExpr *E)
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
Address emitAddrOfRealComponent(Address complex, QualType complexType)
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
This class organizes the cross-function state that is used while generating LLVM code.
ConstantAddress GetAddrOfMSGuidDecl(const MSGuidDecl *GD)
Get the address of a GUID.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1267
void setDSOLocal(llvm::GlobalValue *GV) const
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
CGDebugInfo * getModuleDebugInfo()
ConstantAddress GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E)
Returns a pointer to a constant global variable for the given file-scope compound literal expression.
llvm::ConstantInt * CreateCrossDsoCfiTypeId(llvm::Metadata *MD)
Generate a cross-DSO type identifier for MD.
void setTypeDescriptorInMap(QualType Ty, llvm::Constant *C)
llvm::Constant * getRawFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return a function pointer for a reference to the given function.
Definition: CGExpr.cpp:2913
llvm::FunctionCallee getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo=nullptr, llvm::FunctionType *FnType=nullptr, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Definition: CGCXX.cpp:218
llvm::Constant * GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH=false)
Get the address of the RTTI descriptor for the given type.
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
Address createUnnamedGlobalFrom(const VarDecl &D, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1107
llvm::Constant * getFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return the ABI-correct function pointer value for a reference to the given function.
DiagnosticsEngine & getDiags() const
void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref< void()> Fn)
Run some code with "sufficient" stack space.
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const TargetInfo & getTarget() const
llvm::Metadata * CreateMetadataIdentifierForType(QualType T)
Create a metadata identifier for the given type.
llvm::Constant * getTypeDescriptorFromMap(QualType Ty)
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
llvm::MDNode * getTBAABaseTypeInfo(QualType QTy)
getTBAABaseTypeInfo - Get metadata that describes the given base access type.
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
ConstantAddress GetWeakRefReference(const ValueDecl *VD)
Get a reference to the target of VD.
CGPointerAuthInfo getFunctionPointerAuthInfo(QualType T)
Return the abstract pointer authentication schema for a pointer to the given function type.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
llvm::Metadata * CreateMetadataIdentifierGeneralized(QualType T)
Create a metadata identifier for the generalization of the given type.
const llvm::Triple & getTriple() const
llvm::Constant * getOrCreateStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
Definition: CGDecl.cpp:245
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAInfoForSubobject(LValue Base, QualType AccessType)
getTBAAInfoForSubobject - Get TBAA information for an access with a given base lvalue.
llvm::Constant * CreateRuntimeVariable(llvm::Type *Ty, StringRef Name)
Create a new runtime global variable with the specified type and name.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ConstantAddress GetAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name=".str")
Return a pointer to a constant array for the given string literal.
ASTContext & getContext() const
ConstantAddress GetAddrOfTemplateParamObject(const TemplateParamObjectDecl *TPO)
Get the address of a template parameter object.
TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, TBAAAccessInfo TargetInfo)
mergeTBAAInfoForCast - Get merged TBAA information for the purposes of type casts.
llvm::Constant * GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty=nullptr, ForDefinition_t IsForDefinition=NotForDefinition)
Return the llvm::Constant for the address of the given global variable.
llvm::MDNode * getTBAATypeInfo(QualType QTy)
getTBAATypeInfo - Get metadata used to describe accesses to objects of the given type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
TBAAAccessInfo mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA, TBAAAccessInfo InfoB)
mergeTBAAInfoForConditionalOperator - Get merged TBAA information for the purposes of conditional ope...
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
void SetLLVMFunctionAttributes(GlobalDecl GD, const CGFunctionInfo &Info, llvm::Function *F, bool IsThunk)
Set the LLVM function attributes (sext, zext, etc).
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F)
Set the LLVM function attributes which only apply to a function definition.
ConstantAddress GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *)
Return a pointer to a constant array for the given ObjCEncodeExpr node.
ConstantAddress GetAddrOfConstantCString(const std::string &Str, const char *GlobalName=nullptr)
Returns a pointer to a character array containing the literal and a terminating '\0' character.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:679
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
const llvm::DataLayout & getDataLayout() const
Definition: CodeGenTypes.h:99
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
Definition: CGCall.cpp:638
A specialization of Address that requires the address to be an LLVM Constant.
Definition: Address.h:294
ConstantAddress withElementType(llvm::Type *ElemTy) const
Definition: Address.h:310
llvm::Constant * getPointer() const
Definition: Address.h:306
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
llvm::Constant * tryEmitConstantExpr(const ConstantExpr *CE)
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:382
void mergeForCast(const LValueBaseInfo &Info)
Definition: CGValue.h:174
AlignmentSource getAlignmentSource() const
Definition: CGValue.h:171
LValue - This represents an lvalue references.
Definition: CGValue.h:182
bool isBitField() const
Definition: CGValue.h:280
bool isMatrixElt() const
Definition: CGValue.h:283
Expr * getBaseIvarExp() const
Definition: CGValue.h:332
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:409
static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment, QualType type)
Definition: CGValue.h:478
void setObjCIvar(bool Value)
Definition: CGValue.h:298
bool isObjCArray() const
Definition: CGValue.h:300
bool isObjCStrong() const
Definition: CGValue.h:324
bool isGlobalObjCRef() const
Definition: CGValue.h:306
bool isVectorElt() const
Definition: CGValue.h:279
void setObjCArray(bool Value)
Definition: CGValue.h:301
bool isSimple() const
Definition: CGValue.h:278
bool isVolatileQualified() const
Definition: CGValue.h:285
RValue asAggregateRValue() const
Definition: CGValue.h:498
CharUnits getAlignment() const
Definition: CGValue.h:343
llvm::Value * getPointer(CodeGenFunction &CGF) const
llvm::Value * getMatrixIdx() const
Definition: CGValue.h:395
llvm::Value * getGlobalReg() const
Definition: CGValue.h:430
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:432
bool isVolatile() const
Definition: CGValue.h:328
const Qualifiers & getQuals() const
Definition: CGValue.h:338
bool isGlobalReg() const
Definition: CGValue.h:282
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:452
bool isObjCWeak() const
Definition: CGValue.h:321
Address getAddress() const
Definition: CGValue.h:361
unsigned getVRQualifiers() const
Definition: CGValue.h:287
void setThreadLocalRef(bool Value)
Definition: CGValue.h:310
LValue setKnownNonNull()
Definition: CGValue.h:350
bool isNonGC() const
Definition: CGValue.h:303
void setGlobalObjCRef(bool Value)
Definition: CGValue.h:307
bool isExtVectorElt() const
Definition: CGValue.h:281
llvm::Value * getVectorIdx() const
Definition: CGValue.h:382
void setNontemporal(bool Value)
Definition: CGValue.h:319
LValueBaseInfo getBaseInfo() const
Definition: CGValue.h:346
void setARCPreciseLifetime(ARCPreciseLifetime_t value)
Definition: CGValue.h:315
QualType getType() const
Definition: CGValue.h:291
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:424
bool isThreadLocalRef() const
Definition: CGValue.h:309
KnownNonNull_t isKnownNonNull() const
Definition: CGValue.h:349
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:335
void setNonGC(bool Value)
Definition: CGValue.h:304
Address getVectorAddress() const
Definition: CGValue.h:370
bool isNontemporal() const
Definition: CGValue.h:318
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition: CGValue.h:468
bool isObjCIvar() const
Definition: CGValue.h:297
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:442
void setAddress(Address address)
Definition: CGValue.h:363
void setBaseIvarExp(Expr *V)
Definition: CGValue.h:333
Address getExtVectorAddress() const
Definition: CGValue.h:401
static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:488
Address getMatrixAddress() const
Definition: CGValue.h:387
Address getBitFieldAddress() const
Definition: CGValue.h:415
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:125
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:108
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
An abstract representation of an aligned address.
Definition: Address.h:42
RawAddress withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:100
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:77
llvm::Value * getPointer() const
Definition: Address.h:66
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:386
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition: TargetInfo.h:237
Complex values, per C99 6.2.5p11.
Definition: Type.h:3145
QualType getElementType() const
Definition: Type.h:3155
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3477
ConstStmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:195
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
Represents a concrete matrix type with constant number of rows and columns.
Definition: Type.h:4232
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition: Type.h:4250
RecordDecl * getOuterLexicalRecordContext()
Retrieve the outermost lexically enclosing record context.
Definition: DeclBase.cpp:2024
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition: Expr.h:1463
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition: Expr.cpp:487
ValueDecl * getDecl()
Definition: Expr.h:1333
SourceLocation getLocation() const
Definition: Expr.h:1341
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
T * getAttr() const
Definition: DeclBase.h:576
SourceLocation getLocation() const
Definition: DeclBase.h:442
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition: DeclBase.cpp:549
DeclContext * getDeclContext()
Definition: DeclBase.h:451
bool hasAttr() const
Definition: DeclBase.h:580
void ConvertArgToString(ArgumentKind Kind, intptr_t Val, StringRef Modifier, StringRef Argument, ArrayRef< ArgumentValue > PrevArgs, SmallVectorImpl< char > &Output, ArrayRef< intptr_t > QualTypeVals) const
Converts a diagnostic argument (as an intptr_t) into the string that represents it.
Definition: Diagnostic.h:903
Represents an enum.
Definition: Decl.h:3861
bool isFixed() const
Returns true if this is an Objective-C, C++11, or Microsoft-style enumeration with a fixed underlying...
Definition: Decl.h:4075
void getValueRange(llvm::APInt &Max, llvm::APInt &Min) const
Calculates the [Min,Max) values the enum can store based on the NumPositiveBits and NumNegativeBits.
Definition: Decl.cpp:4998
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Definition: Type.h:6103
EnumDecl * getDecl() const
Definition: Type.h:6110
ExplicitCastExpr - An explicit cast written in the source code.
Definition: Expr.h:3799
This represents one expression.
Definition: Expr.h:110
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition: Expr.cpp:82
bool isGLValue() const
Definition: Expr.h:280
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3124
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition: Expr.h:437
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition: Expr.cpp:3097
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3085
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3093
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsLValue - Evaluate an expression to see if we can fold it to an lvalue with link time known ...
bool isPRValue() const
Definition: Expr.h:278
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition: Expr.h:277
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition: Expr.cpp:1550
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3594
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3077
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:276
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition: Expr.h:469
bool isFlexibleArrayMemberLike(ASTContext &Context, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution=false) const
Check whether this array fits the idiom of a flexible array member, depending on the value of -fstric...
Definition: Expr.cpp:205
QualType getType() const
Definition: Expr.h:142
bool isOBJCGCCandidate(ASTContext &Ctx) const
isOBJCGCCandidate - Return true if this expression may be used in a read/ write barrier.
Definition: Expr.cpp:3008
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:6354
Represents a member of a struct/union/class.
Definition: Decl.h:3033
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:3136
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.h:3118
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3264
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition: Decl.cpp:4707
FullExpr - Represents a "full-expression" node.
Definition: Expr.h:1044
const Expr * getSubExpr() const
Definition: Expr.h:1057
Represents a function declaration or definition.
Definition: Decl.h:1935
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3638
Represents a prototype with parameter type info, e.g.
Definition: Type.h:5107
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
const Decl * getDecl() const
Definition: GlobalDecl.h:103
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition: Expr.h:7152
Describes an C or C++ initializer list.
Definition: Expr.h:5088
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:505
virtual void mangleCXXRTTI(QualType T, raw_ostream &)=0
unsigned getBlockId(const BlockDecl *BD, bool Local)
Definition: Mangle.h:84
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4734
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition: ExprCXX.h:4759
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition: ExprCXX.h:4751
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition: ExprCXX.h:4784
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2796
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3236
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition: Expr.h:3319
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why? This is only meaningful if the named memb...
Definition: Expr.h:3460
Expr * getBase() const
Definition: Expr.h:3313
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:3431
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3519
bool isObjCBOOLType(QualType T) const
Returns true if.
Definition: NSAPI.cpp:481
This represents a decl that may have a name.
Definition: Decl.h:253
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:280
A C++ nested-name-specifier augmented with source location information.
bool containsType(SanitizerMask Mask, StringRef MangledTypeName, StringRef Category=StringRef()) const
ObjCEncodeExpr, used for @encode in Objective-C.
Definition: ExprObjC.h:410
Represents an ObjC class declaration.
Definition: DeclObjC.h:1153
ObjCIvarDecl - Represents an ObjC instance variable.
Definition: DeclObjC.h:1951
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:941
Represents a class type in Objective C.
Definition: Type.h:7331
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:455
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1173
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1223
bool isUnique() const
Definition: Expr.h:1231
ParenExpr - This represents a parenthesized expression, e.g.
Definition: Expr.h:2170
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3198
QualType getPointeeType() const
Definition: Type.h:3208
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1991
StringRef getIdentKindName() const
Definition: Expr.h:2048
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
bool isValid() const
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6546
const Expr *const * const_semantics_iterator
Definition: Expr.h:6611
A (possibly-)qualified type.
Definition: Type.h:929
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:8020
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:8062
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:7976
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: Type.h:1433
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:8030
QualType withCVRQualifiers(unsigned CVR) const
Definition: Type.h:1174
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: Type.h:1531
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition: Type.h:1028
The collection of all-type qualifiers we support.
Definition: Type.h:324
unsigned getCVRQualifiers() const
Definition: Type.h:481
GC getObjCGCAttr() const
Definition: Type.h:512
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: Type.h:354
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: Type.h:347
@ OCL_None
There is no lifetime qualification on this type.
Definition: Type.h:343
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: Type.h:357
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: Type.h:360
bool hasConst() const
Definition: Type.h:450
void addCVRQualifiers(unsigned mask)
Definition: Type.h:495
void removeObjCGCAttr()
Definition: Type.h:516
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition: Type.h:643
void setAddressSpace(LangAS space)
Definition: Type.h:584
bool hasVolatile() const
Definition: Type.h:460
ObjCLifetime getObjCLifetime() const
Definition: Type.h:538
void addVolatile()
Definition: Type.h:463
Represents a struct/union/class.
Definition: Decl.h:4162
field_range fields() const
Definition: Decl.h:4376
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition: Decl.h:4361
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:6077
decl_type * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Definition: Redeclarable.h:203
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4466
Stmt - This represents one statement.
Definition: Stmt.h:84
StmtClass getStmtClass() const
Definition: Stmt.h:1380
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:345
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
bool isUnion() const
Definition: Decl.h:3784
Exposes information about the current target.
Definition: TargetInfo.h:220
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1262
virtual StringRef getABI() const
Get the ABI currently in use.
Definition: TargetInfo.h:1330
const Type * getTypeForDecl() const
Definition: Decl.h:3409
The type-property cache.
Definition: Type.cpp:4501
The base class of the type hierarchy.
Definition: Type.h:1828
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1916
bool isBlockPointerType() const
Definition: Type.h:8205
bool isVoidType() const
Definition: Type.h:8515
bool isBooleanType() const
Definition: Type.h:8643
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2201
bool hasAttr(attr::Kind AK) const
Determine whether this type had the specified attribute applied to it (looking through top-level type...
Definition: Type.cpp:1933
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition: Type.cpp:2180
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition: Type.h:8814
bool isConstantArrayType() const
Definition: Type.h:8267
bool isArrayType() const
Definition: Type.h:8263
bool isFunctionPointerType() const
Definition: Type.h:8231
bool isCountAttributedType() const
Definition: Type.cpp:727
bool isArithmeticType() const
Definition: Type.cpp:2315
bool isConstantMatrixType() const
Definition: Type.h:8325
bool isPointerType() const
Definition: Type.h:8191
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:8555
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8805
bool isReferenceType() const
Definition: Type.h:8209
bool isVariableArrayType() const
Definition: Type.h:8275
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
bool isExtVectorBoolType() const
Definition: Type.h:8311
bool isBitIntType() const
Definition: Type.h:8429
bool isAnyComplexType() const
Definition: Type.h:8299
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition: Type.h:8686
bool isAtomicType() const
Definition: Type.h:8346
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2724
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition: Type.cpp:2396
bool isFunctionType() const
Definition: Type.h:8187
bool isObjCObjectPointerType() const
Definition: Type.h:8333
bool isVectorType() const
Definition: Type.h:8303
bool isFloatingType() const
Definition: Type.cpp:2283
bool isSubscriptableVectorType() const
Definition: Type.h:8317
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8736
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition: Type.cpp:638
bool isRecordType() const
Definition: Type.h:8291
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.cpp:1920
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2232
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4750
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:671
QualType getType() const
Definition: Decl.h:682
QualType getType() const
Definition: Value.cpp:234
Represents a variable declaration or definition.
Definition: Decl.h:882
TLSKind getTLSKind() const
Definition: Decl.cpp:2157
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition: Decl.cpp:2355
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition: Decl.h:1135
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition: Decl.h:908
@ TLS_None
Not a TLS variable.
Definition: Decl.h:902
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3808
Represents a GCC generic vector type.
Definition: Type.h:4034
unsigned getNumElements() const
Definition: Type.h:4049
#define INT_MIN
Definition: limits.h:55
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:141
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
bool isEmptyFieldForLayout(const ASTContext &Context, const FieldDecl *FD)
isEmptyFieldForLayout - Return true iff the field is "empty", that is, either a zero-width bit-field ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
@ ARCImpreciseLifetime
Definition: CGValue.h:136
static AlignmentSource getFieldAlignmentSource(AlignmentSource Source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
Definition: CGValue.h:159
@ NotKnownNonNull
Definition: Address.h:33
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const AstTypeMatcher< FunctionType > functionType
Matches FunctionType nodes.
constexpr Variable var(Literal L)
Returns the variable of L.
Definition: CNFFormula.h:64
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
Definition: StoreRef.h:27
bool This(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2390
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2353
bool IsNonNull(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2378
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1693
bool Cast(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2126
The JSON file list parser is used to communicate input to InstallAPI.
@ OpenCL
Definition: LangStandard.h:65
@ CPlusPlus
Definition: LangStandard.h:55
@ OK_BitField
A bitfield object is a bitfield on a C or C++ record.
Definition: Specifiers.h:154
@ SC_Register
Definition: Specifiers.h:257
@ Asm
Assembly: we accept this only so that we can preprocess it.
StorageDuration
The storage duration for an object (per C++ [basic.stc]).
Definition: Specifiers.h:327
@ SD_Thread
Thread storage duration.
Definition: Specifiers.h:330
@ SD_Static
Static storage duration.
Definition: Specifiers.h:331
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition: Specifiers.h:328
@ SD_Automatic
Automatic storage duration (most local variables).
Definition: Specifiers.h:329
@ SD_Dynamic
Dynamic storage duration.
Definition: Specifiers.h:332
@ Result
The result type of a method or function.
@ Dtor_Complete
Complete object dtor.
Definition: ABI.h:35
llvm::cl::opt< bool > ClSanitizeGuardChecks
const FunctionProtoType * T
LangAS getLangASFromTargetAS(unsigned TargetAS)
Definition: AddressSpaces.h:86
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
@ Other
Other implicit parameter.
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition: Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition: Specifiers.h:180
unsigned long uint64_t
unsigned int uint32_t
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type,...
Structure with information about how a bitfield should be accessed.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
llvm::MDNode * AccessType
AccessType - The final access type.
Definition: CodeGenTBAA.h:105
uint64_t Offset
Offset - The byte offset of the final access within the base one.
Definition: CodeGenTBAA.h:109
static TBAAAccessInfo getMayAliasInfo()
Definition: CodeGenTBAA.h:63
uint64_t Size
Size - The size of access, in bytes.
Definition: CodeGenTBAA.h:112
llvm::MDNode * BaseType
BaseType - The base/leading access type.
Definition: CodeGenTBAA.h:101
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition: Expr.h:609
PointerAuthSchema FunctionPointers
The ABI for C function pointers.
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:182
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:169
An adjustment to be made to the temporary created when emitting a reference binding,...
Definition: Expr.h:66