clang 19.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCUDARuntime.h"
14#include "CGCXXABI.h"
15#include "CGCall.h"
16#include "CGCleanup.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenMPRuntime.h"
20#include "CGRecordLayout.h"
21#include "CodeGenFunction.h"
22#include "CodeGenModule.h"
23#include "ConstantEmitter.h"
24#include "TargetInfo.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/NSAPI.h"
33#include "llvm/ADT/Hashing.h"
34#include "llvm/ADT/STLExtras.h"
35#include "llvm/ADT/StringExtras.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/Intrinsics.h"
38#include "llvm/IR/IntrinsicsWebAssembly.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/MDBuilder.h"
41#include "llvm/IR/MatrixBuilder.h"
42#include "llvm/Passes/OptimizationLevel.h"
43#include "llvm/Support/ConvertUTF.h"
44#include "llvm/Support/MathExtras.h"
45#include "llvm/Support/Path.h"
46#include "llvm/Support/SaveAndRestore.h"
47#include "llvm/Support/xxhash.h"
48#include "llvm/Transforms/Utils/SanitizerStats.h"
49
50#include <optional>
51#include <string>
52
53using namespace clang;
54using namespace CodeGen;
55
56// Experiment to make sanitizers easier to debug
57static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization(
58 "ubsan-unique-traps", llvm::cl::Optional,
59 llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check."));
60
61// TODO: Introduce frontend options to enabled per sanitizers, similar to
62// `fsanitize-trap`.
63static llvm::cl::opt<bool> ClSanitizeGuardChecks(
64 "ubsan-guard-checks", llvm::cl::Optional,
65 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
66
67//===--------------------------------------------------------------------===//
68// Miscellaneous Helper Methods
69//===--------------------------------------------------------------------===//
70
71/// CreateTempAlloca - This creates a alloca and inserts it into the entry
72/// block.
74CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
75 const Twine &Name,
76 llvm::Value *ArraySize) {
77 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
78 Alloca->setAlignment(Align.getAsAlign());
79 return RawAddress(Alloca, Ty, Align, KnownNonNull);
80}
81
82/// CreateTempAlloca - This creates a alloca and inserts it into the entry
83/// block. The alloca is casted to default address space if necessary.
85 const Twine &Name,
86 llvm::Value *ArraySize,
87 RawAddress *AllocaAddr) {
88 auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
89 if (AllocaAddr)
90 *AllocaAddr = Alloca;
91 llvm::Value *V = Alloca.getPointer();
92 // Alloca always returns a pointer in alloca address space, which may
93 // be different from the type defined by the language. For example,
94 // in C++ the auto variables are in the default address space. Therefore
95 // cast alloca to the default address space when necessary.
97 auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
98 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
99 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
100 // otherwise alloca is inserted at the current insertion point of the
101 // builder.
102 if (!ArraySize)
103 Builder.SetInsertPoint(getPostAllocaInsertPoint());
106 Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
107 }
108
109 return RawAddress(V, Ty, Align, KnownNonNull);
110}
111
112/// CreateTempAlloca - This creates an alloca and inserts it into the entry
113/// block if \p ArraySize is nullptr, otherwise inserts it at the current
114/// insertion point of the builder.
115llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
116 const Twine &Name,
117 llvm::Value *ArraySize) {
118 llvm::AllocaInst *Alloca;
119 if (ArraySize)
120 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
121 else
122 Alloca = new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
123 ArraySize, Name, AllocaInsertPt);
124 if (Allocas) {
125 Allocas->Add(Alloca);
126 }
127 return Alloca;
128}
129
130/// CreateDefaultAlignTempAlloca - This creates an alloca with the
131/// default alignment of the corresponding LLVM type, which is *not*
132/// guaranteed to be related in any way to the expected alignment of
133/// an AST type that might have been lowered to Ty.
135 const Twine &Name) {
136 CharUnits Align =
137 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
138 return CreateTempAlloca(Ty, Align, Name);
139}
140
143 return CreateTempAlloca(ConvertType(Ty), Align, Name);
144}
145
147 RawAddress *Alloca) {
148 // FIXME: Should we prefer the preferred type alignment here?
149 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
150}
151
153 const Twine &Name,
154 RawAddress *Alloca) {
156 /*ArraySize=*/nullptr, Alloca);
157
158 if (Ty->isConstantMatrixType()) {
159 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
160 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
161 ArrayTy->getNumElements());
162
163 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
165 }
166 return Result;
167}
168
170 CharUnits Align,
171 const Twine &Name) {
172 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
173}
174
176 const Twine &Name) {
177 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
178 Name);
179}
180
181/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
182/// expression and compare the result against zero, returning an Int1Ty value.
183llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
184 PGO.setCurrentStmt(E);
185 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
186 llvm::Value *MemPtr = EmitScalarExpr(E);
187 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
188 }
189
190 QualType BoolTy = getContext().BoolTy;
192 CGFPOptionsRAII FPOptsRAII(*this, E);
193 if (!E->getType()->isAnyComplexType())
194 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
195
197 Loc);
198}
199
200/// EmitIgnoredExpr - Emit code to compute the specified expression,
201/// ignoring the result.
203 if (E->isPRValue())
204 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
205
206 // if this is a bitfield-resulting conditional operator, we can special case
207 // emit this. The normal 'EmitLValue' version of this is particularly
208 // difficult to codegen for, since creating a single "LValue" for two
209 // different sized arguments here is not particularly doable.
210 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
212 if (CondOp->getObjectKind() == OK_BitField)
213 return EmitIgnoredConditionalOperator(CondOp);
214 }
215
216 // Just emit it as an l-value and drop the result.
217 EmitLValue(E);
218}
219
220/// EmitAnyExpr - Emit code to compute the specified expression which
221/// can have any type. The result is returned as an RValue struct.
222/// If this is an aggregate expression, AggSlot indicates where the
223/// result should be returned.
225 AggValueSlot aggSlot,
226 bool ignoreResult) {
227 switch (getEvaluationKind(E->getType())) {
228 case TEK_Scalar:
229 return RValue::get(EmitScalarExpr(E, ignoreResult));
230 case TEK_Complex:
231 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
232 case TEK_Aggregate:
233 if (!ignoreResult && aggSlot.isIgnored())
234 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
235 EmitAggExpr(E, aggSlot);
236 return aggSlot.asRValue();
237 }
238 llvm_unreachable("bad evaluation kind");
239}
240
241/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
242/// always be accessible even if no aggregate location is provided.
245
247 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
248 return EmitAnyExpr(E, AggSlot);
249}
250
251/// EmitAnyExprToMem - Evaluate an expression into a given memory
252/// location.
254 Address Location,
255 Qualifiers Quals,
256 bool IsInit) {
257 // FIXME: This function should take an LValue as an argument.
258 switch (getEvaluationKind(E->getType())) {
259 case TEK_Complex:
261 /*isInit*/ false);
262 return;
263
264 case TEK_Aggregate: {
265 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
270 return;
271 }
272
273 case TEK_Scalar: {
274 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
275 LValue LV = MakeAddrLValue(Location, E->getType());
277 return;
278 }
279 }
280 llvm_unreachable("bad evaluation kind");
281}
282
283static void
285 const Expr *E, Address ReferenceTemporary) {
286 // Objective-C++ ARC:
287 // If we are binding a reference to a temporary that has ownership, we
288 // need to perform retain/release operations on the temporary.
289 //
290 // FIXME: This should be looking at E, not M.
291 if (auto Lifetime = M->getType().getObjCLifetime()) {
292 switch (Lifetime) {
295 // Carry on to normal cleanup handling.
296 break;
297
299 // Nothing to do; cleaned up by an autorelease pool.
300 return;
301
304 switch (StorageDuration Duration = M->getStorageDuration()) {
305 case SD_Static:
306 // Note: we intentionally do not register a cleanup to release
307 // the object on program termination.
308 return;
309
310 case SD_Thread:
311 // FIXME: We should probably register a cleanup in this case.
312 return;
313
314 case SD_Automatic:
318 if (Lifetime == Qualifiers::OCL_Strong) {
319 const ValueDecl *VD = M->getExtendingDecl();
320 bool Precise =
321 VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
325 } else {
326 // __weak objects always get EH cleanups; otherwise, exceptions
327 // could cause really nasty crashes instead of mere leaks.
330 }
331 if (Duration == SD_FullExpression)
332 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
333 M->getType(), *Destroy,
335 else
336 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
337 M->getType(),
338 *Destroy, CleanupKind & EHCleanup);
339 return;
340
341 case SD_Dynamic:
342 llvm_unreachable("temporary cannot have dynamic storage duration");
343 }
344 llvm_unreachable("unknown storage duration");
345 }
346 }
347
348 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
349 if (const RecordType *RT =
351 // Get the destructor for the reference temporary.
352 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
353 if (!ClassDecl->hasTrivialDestructor())
354 ReferenceTemporaryDtor = ClassDecl->getDestructor();
355 }
356
357 if (!ReferenceTemporaryDtor)
358 return;
359
360 // Call the destructor for the temporary.
361 switch (M->getStorageDuration()) {
362 case SD_Static:
363 case SD_Thread: {
364 llvm::FunctionCallee CleanupFn;
365 llvm::Constant *CleanupArg;
366 if (E->getType()->isArrayType()) {
368 ReferenceTemporary, E->getType(),
370 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
371 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
372 } else {
373 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
374 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
375 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
376 }
378 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
379 break;
380 }
381
383 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
385 CGF.getLangOpts().Exceptions);
386 break;
387
388 case SD_Automatic:
390 ReferenceTemporary, E->getType(),
392 CGF.getLangOpts().Exceptions);
393 break;
394
395 case SD_Dynamic:
396 llvm_unreachable("temporary cannot have dynamic storage duration");
397 }
398}
399
402 const Expr *Inner,
403 RawAddress *Alloca = nullptr) {
404 auto &TCG = CGF.getTargetHooks();
405 switch (M->getStorageDuration()) {
407 case SD_Automatic: {
408 // If we have a constant temporary array or record try to promote it into a
409 // constant global under the same rules a normal constant would've been
410 // promoted. This is easier on the optimizer and generally emits fewer
411 // instructions.
412 QualType Ty = Inner->getType();
413 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
414 (Ty->isArrayType() || Ty->isRecordType()) &&
415 Ty.isConstantStorage(CGF.getContext(), true, false))
416 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
417 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
418 auto *GV = new llvm::GlobalVariable(
419 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
420 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
421 llvm::GlobalValue::NotThreadLocal,
423 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
424 GV->setAlignment(alignment.getAsAlign());
425 llvm::Constant *C = GV;
426 if (AS != LangAS::Default)
427 C = TCG.performAddrSpaceCast(
428 CGF.CGM, GV, AS, LangAS::Default,
429 GV->getValueType()->getPointerTo(
431 // FIXME: Should we put the new global into a COMDAT?
432 return RawAddress(C, GV->getValueType(), alignment);
433 }
434 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
435 }
436 case SD_Thread:
437 case SD_Static:
438 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
439
440 case SD_Dynamic:
441 llvm_unreachable("temporary can't have dynamic storage duration");
442 }
443 llvm_unreachable("unknown storage duration");
444}
445
446/// Helper method to check if the underlying ABI is AAPCS
447static bool isAAPCS(const TargetInfo &TargetInfo) {
448 return TargetInfo.getABI().starts_with("aapcs");
449}
450
453 const Expr *E = M->getSubExpr();
454
455 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
456 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
457 "Reference should never be pseudo-strong!");
458
459 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
460 // as that will cause the lifetime adjustment to be lost for ARC
461 auto ownership = M->getType().getObjCLifetime();
462 if (ownership != Qualifiers::OCL_None &&
463 ownership != Qualifiers::OCL_ExplicitNone) {
465 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
466 llvm::Type *Ty = ConvertTypeForMem(E->getType());
467 Object = Object.withElementType(Ty);
468
469 // createReferenceTemporary will promote the temporary to a global with a
470 // constant initializer if it can. It can only do this to a value of
471 // ARC-manageable type if the value is global and therefore "immune" to
472 // ref-counting operations. Therefore we have no need to emit either a
473 // dynamic initialization or a cleanup and we can just return the address
474 // of the temporary.
475 if (Var->hasInitializer())
476 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
477
478 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
479 }
480 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
482
483 switch (getEvaluationKind(E->getType())) {
484 default: llvm_unreachable("expected scalar or aggregate expression");
485 case TEK_Scalar:
486 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
487 break;
488 case TEK_Aggregate: {
490 E->getType().getQualifiers(),
495 break;
496 }
497 }
498
499 pushTemporaryCleanup(*this, M, E, Object);
500 return RefTempDst;
501 }
502
505 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
506
507 for (const auto &Ignored : CommaLHSs)
508 EmitIgnoredExpr(Ignored);
509
510 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
511 if (opaque->getType()->isRecordType()) {
512 assert(Adjustments.empty());
513 return EmitOpaqueValueLValue(opaque);
514 }
515 }
516
517 // Create and initialize the reference temporary.
518 RawAddress Alloca = Address::invalid();
519 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
520 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
521 Object.getPointer()->stripPointerCasts())) {
522 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
523 Object = Object.withElementType(TemporaryType);
524 // If the temporary is a global and has a constant initializer or is a
525 // constant temporary that we promoted to a global, we may have already
526 // initialized it.
527 if (!Var->hasInitializer()) {
528 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
529 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
530 }
531 } else {
532 switch (M->getStorageDuration()) {
533 case SD_Automatic:
534 if (auto *Size = EmitLifetimeStart(
535 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
536 Alloca.getPointer())) {
537 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
538 Alloca, Size);
539 }
540 break;
541
542 case SD_FullExpression: {
543 if (!ShouldEmitLifetimeMarkers)
544 break;
545
546 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
547 // marker. Instead, start the lifetime of a conditional temporary earlier
548 // so that it's unconditional. Don't do this with sanitizers which need
549 // more precise lifetime marks. However when inside an "await.suspend"
550 // block, we should always avoid conditional cleanup because it creates
551 // boolean marker that lives across await_suspend, which can destroy coro
552 // frame.
553 ConditionalEvaluation *OldConditional = nullptr;
554 CGBuilderTy::InsertPoint OldIP;
556 ((!SanOpts.has(SanitizerKind::HWAddress) &&
557 !SanOpts.has(SanitizerKind::Memory) &&
558 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
559 inSuspendBlock())) {
560 OldConditional = OutermostConditional;
561 OutermostConditional = nullptr;
562
563 OldIP = Builder.saveIP();
564 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
565 Builder.restoreIP(CGBuilderTy::InsertPoint(
566 Block, llvm::BasicBlock::iterator(Block->back())));
567 }
568
569 if (auto *Size = EmitLifetimeStart(
570 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
571 Alloca.getPointer())) {
572 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
573 Size);
574 }
575
576 if (OldConditional) {
577 OutermostConditional = OldConditional;
578 Builder.restoreIP(OldIP);
579 }
580 break;
581 }
582
583 default:
584 break;
585 }
586 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
587 }
588 pushTemporaryCleanup(*this, M, E, Object);
589
590 // Perform derived-to-base casts and/or field accesses, to get from the
591 // temporary object we created (and, potentially, for which we extended
592 // the lifetime) to the subobject we're binding the reference to.
593 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
594 switch (Adjustment.Kind) {
596 Object =
597 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
598 Adjustment.DerivedToBase.BasePath->path_begin(),
599 Adjustment.DerivedToBase.BasePath->path_end(),
600 /*NullCheckValue=*/ false, E->getExprLoc());
601 break;
602
605 LV = EmitLValueForField(LV, Adjustment.Field);
606 assert(LV.isSimple() &&
607 "materialized temporary field is not a simple lvalue");
608 Object = LV.getAddress(*this);
609 break;
610 }
611
613 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
615 Adjustment.Ptr.MPT);
616 break;
617 }
618 }
619 }
620
621 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
622}
623
624RValue
626 // Emit the expression as an lvalue.
627 LValue LV = EmitLValue(E);
628 assert(LV.isSimple());
629 llvm::Value *Value = LV.getPointer(*this);
630
632 // C++11 [dcl.ref]p5 (as amended by core issue 453):
633 // If a glvalue to which a reference is directly bound designates neither
634 // an existing object or function of an appropriate type nor a region of
635 // storage of suitable size and alignment to contain an object of the
636 // reference's type, the behavior is undefined.
637 QualType Ty = E->getType();
639 }
640
641 return RValue::get(Value);
642}
643
644
645/// getAccessedFieldNo - Given an encoded value and a result number, return the
646/// input field number being accessed.
647unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
648 const llvm::Constant *Elts) {
649 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
650 ->getZExtValue();
651}
652
653/// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
654static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
655 llvm::Value *High) {
656 llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
657 llvm::Value *K47 = Builder.getInt64(47);
658 llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
659 llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
660 llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
661 llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
662 return Builder.CreateMul(B1, KMul);
663}
664
665bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
666 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
668}
669
670bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
672 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
673 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
676}
677
679 return SanOpts.has(SanitizerKind::Null) ||
680 SanOpts.has(SanitizerKind::Alignment) ||
681 SanOpts.has(SanitizerKind::ObjectSize) ||
682 SanOpts.has(SanitizerKind::Vptr);
683}
684
686 llvm::Value *Ptr, QualType Ty,
687 CharUnits Alignment,
688 SanitizerSet SkippedChecks,
689 llvm::Value *ArraySize) {
691 return;
692
693 // Don't check pointers outside the default address space. The null check
694 // isn't correct, the object-size check isn't supported by LLVM, and we can't
695 // communicate the addresses to the runtime handler for the vptr check.
696 if (Ptr->getType()->getPointerAddressSpace())
697 return;
698
699 // Don't check pointers to volatile data. The behavior here is implementation-
700 // defined.
701 if (Ty.isVolatileQualified())
702 return;
703
704 SanitizerScope SanScope(this);
705
707 llvm::BasicBlock *Done = nullptr;
708
709 // Quickly determine whether we have a pointer to an alloca. It's possible
710 // to skip null checks, and some alignment checks, for these pointers. This
711 // can reduce compile-time significantly.
712 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
713
714 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
715 llvm::Value *IsNonNull = nullptr;
716 bool IsGuaranteedNonNull =
717 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
718 bool AllowNullPointers = isNullPointerAllowed(TCK);
719 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
720 !IsGuaranteedNonNull) {
721 // The glvalue must not be an empty glvalue.
722 IsNonNull = Builder.CreateIsNotNull(Ptr);
723
724 // The IR builder can constant-fold the null check if the pointer points to
725 // a constant.
726 IsGuaranteedNonNull = IsNonNull == True;
727
728 // Skip the null check if the pointer is known to be non-null.
729 if (!IsGuaranteedNonNull) {
730 if (AllowNullPointers) {
731 // When performing pointer casts, it's OK if the value is null.
732 // Skip the remaining checks in that case.
733 Done = createBasicBlock("null");
734 llvm::BasicBlock *Rest = createBasicBlock("not.null");
735 Builder.CreateCondBr(IsNonNull, Rest, Done);
736 EmitBlock(Rest);
737 } else {
738 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
739 }
740 }
741 }
742
743 if (SanOpts.has(SanitizerKind::ObjectSize) &&
744 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
745 !Ty->isIncompleteType()) {
747 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
748 if (ArraySize)
749 Size = Builder.CreateMul(Size, ArraySize);
750
751 // Degenerate case: new X[0] does not need an objectsize check.
752 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
753 if (!ConstantSize || !ConstantSize->isNullValue()) {
754 // The glvalue must refer to a large enough storage region.
755 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
756 // to check this.
757 // FIXME: Get object address space
758 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
759 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
760 llvm::Value *Min = Builder.getFalse();
761 llvm::Value *NullIsUnknown = Builder.getFalse();
762 llvm::Value *Dynamic = Builder.getFalse();
763 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
764 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
765 Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
766 }
767 }
768
769 llvm::MaybeAlign AlignVal;
770 llvm::Value *PtrAsInt = nullptr;
771
772 if (SanOpts.has(SanitizerKind::Alignment) &&
773 !SkippedChecks.has(SanitizerKind::Alignment)) {
774 AlignVal = Alignment.getAsMaybeAlign();
775 if (!Ty->isIncompleteType() && !AlignVal)
776 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
777 /*ForPointeeType=*/true)
779
780 // The glvalue must be suitably aligned.
781 if (AlignVal && *AlignVal > llvm::Align(1) &&
782 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
783 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
784 llvm::Value *Align = Builder.CreateAnd(
785 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
786 llvm::Value *Aligned =
787 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
788 if (Aligned != True)
789 Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
790 }
791 }
792
793 if (Checks.size() > 0) {
794 llvm::Constant *StaticData[] = {
796 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
797 llvm::ConstantInt::get(Int8Ty, TCK)};
798 EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
799 PtrAsInt ? PtrAsInt : Ptr);
800 }
801
802 // If possible, check that the vptr indicates that there is a subobject of
803 // type Ty at offset zero within this object.
804 //
805 // C++11 [basic.life]p5,6:
806 // [For storage which does not refer to an object within its lifetime]
807 // The program has undefined behavior if:
808 // -- the [pointer or glvalue] is used to access a non-static data member
809 // or call a non-static member function
810 if (SanOpts.has(SanitizerKind::Vptr) &&
811 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
812 // Ensure that the pointer is non-null before loading it. If there is no
813 // compile-time guarantee, reuse the run-time null check or emit a new one.
814 if (!IsGuaranteedNonNull) {
815 if (!IsNonNull)
816 IsNonNull = Builder.CreateIsNotNull(Ptr);
817 if (!Done)
818 Done = createBasicBlock("vptr.null");
819 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
820 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
821 EmitBlock(VptrNotNull);
822 }
823
824 // Compute a hash of the mangled name of the type.
825 //
826 // FIXME: This is not guaranteed to be deterministic! Move to a
827 // fingerprinting mechanism once LLVM provides one. For the time
828 // being the implementation happens to be deterministic.
829 SmallString<64> MangledName;
830 llvm::raw_svector_ostream Out(MangledName);
832 Out);
833
834 // Contained in NoSanitizeList based on the mangled type.
835 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
836 Out.str())) {
837 llvm::hash_code TypeHash = hash_value(Out.str());
838
839 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
840 llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
841 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
842 llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
843 llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
844
845 llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
846 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
847
848 // Look the hash up in our cache.
849 const int CacheSize = 128;
850 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
851 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
852 "__ubsan_vptr_type_cache");
853 llvm::Value *Slot = Builder.CreateAnd(Hash,
854 llvm::ConstantInt::get(IntPtrTy,
855 CacheSize-1));
856 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
857 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
858 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
860
861 // If the hash isn't in the cache, call a runtime handler to perform the
862 // hard work of checking whether the vptr is for an object of the right
863 // type. This will either fill in the cache and return, or produce a
864 // diagnostic.
865 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
866 llvm::Constant *StaticData[] = {
870 llvm::ConstantInt::get(Int8Ty, TCK)
871 };
872 llvm::Value *DynamicData[] = { Ptr, Hash };
873 EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
874 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
875 DynamicData);
876 }
877 }
878
879 if (Done) {
880 Builder.CreateBr(Done);
881 EmitBlock(Done);
882 }
883}
884
885llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,
886 QualType EltTy) {
888 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
889 if (!EltSize)
890 return nullptr;
891
892 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
893 if (!ArrayDeclRef)
894 return nullptr;
895
896 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
897 if (!ParamDecl)
898 return nullptr;
899
900 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
901 if (!POSAttr)
902 return nullptr;
903
904 // Don't load the size if it's a lower bound.
905 int POSType = POSAttr->getType();
906 if (POSType != 0 && POSType != 1)
907 return nullptr;
908
909 // Find the implicit size parameter.
910 auto PassedSizeIt = SizeArguments.find(ParamDecl);
911 if (PassedSizeIt == SizeArguments.end())
912 return nullptr;
913
914 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
915 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
916 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
917 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
918 C.getSizeType(), E->getExprLoc());
919 llvm::Value *SizeOfElement =
920 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
921 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
922}
923
924/// If Base is known to point to the start of an array, return the length of
925/// that array. Return 0 if the length cannot be determined.
926static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
927 const Expr *Base,
928 QualType &IndexedType,
930 StrictFlexArraysLevel) {
931 // For the vector indexing extension, the bound is the number of elements.
932 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
933 IndexedType = Base->getType();
934 return CGF.Builder.getInt32(VT->getNumElements());
935 }
936
937 Base = Base->IgnoreParens();
938
939 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
940 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
941 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
942 StrictFlexArraysLevel)) {
943 CodeGenFunction::SanitizerScope SanScope(&CGF);
944
945 IndexedType = CE->getSubExpr()->getType();
946 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
947 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
948 return CGF.Builder.getInt(CAT->getSize());
949
950 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
951 return CGF.getVLASize(VAT).NumElts;
952 // Ignore pass_object_size here. It's not applicable on decayed pointers.
953 }
954 }
955
956 CodeGenFunction::SanitizerScope SanScope(&CGF);
957
958 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
959 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
960 IndexedType = Base->getType();
961 return POS;
962 }
963
964 return nullptr;
965}
966
967namespace {
968
969/// \p StructAccessBase returns the base \p Expr of a field access. It returns
970/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
971///
972/// p in p-> a.b.c
973///
974/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
975/// looking for:
976///
977/// struct s {
978/// struct s *ptr;
979/// int count;
980/// char array[] __attribute__((counted_by(count)));
981/// };
982///
983/// If we have an expression like \p p->ptr->array[index], we want the
984/// \p MemberExpr for \p p->ptr instead of \p p.
985class StructAccessBase
986 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
987 const RecordDecl *ExpectedRD;
988
989 bool IsExpectedRecordDecl(const Expr *E) const {
990 QualType Ty = E->getType();
991 if (Ty->isPointerType())
992 Ty = Ty->getPointeeType();
993 return ExpectedRD == Ty->getAsRecordDecl();
994 }
995
996public:
997 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
998
999 //===--------------------------------------------------------------------===//
1000 // Visitor Methods
1001 //===--------------------------------------------------------------------===//
1002
1003 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1004 // horrors like this:
1005 //
1006 // struct S {
1007 // int x, y;
1008 // int blah[] __attribute__((counted_by(x)));
1009 // } s;
1010 //
1011 // int foo(int index, int val) {
1012 // int (S::*IHatePMDs)[] = &S::blah;
1013 // (s.*IHatePMDs)[index] = val;
1014 // }
1015
1016 const Expr *Visit(const Expr *E) {
1018 }
1019
1020 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1021
1022 // These are the types we expect to return (in order of most to least
1023 // likely):
1024 //
1025 // 1. DeclRefExpr - This is the expression for the base of the structure.
1026 // It's exactly what we want to build an access to the \p counted_by
1027 // field.
1028 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1029 // as the flexble array member's lexical enclosing \p RecordDecl. This
1030 // allows us to catch things like: "p->p->array"
1031 // 3. CompoundLiteralExpr - This is for people who create something
1032 // heretical like (struct foo has a flexible array member):
1033 //
1034 // (struct foo){ 1, 2 }.blah[idx];
1035 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1036 return IsExpectedRecordDecl(E) ? E : nullptr;
1037 }
1038 const Expr *VisitMemberExpr(const MemberExpr *E) {
1039 if (IsExpectedRecordDecl(E) && E->isArrow())
1040 return E;
1041 const Expr *Res = Visit(E->getBase());
1042 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1043 }
1044 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1045 return IsExpectedRecordDecl(E) ? E : nullptr;
1046 }
1047 const Expr *VisitCallExpr(const CallExpr *E) {
1048 return IsExpectedRecordDecl(E) ? E : nullptr;
1049 }
1050
1051 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1052 if (IsExpectedRecordDecl(E))
1053 return E;
1054 return Visit(E->getBase());
1055 }
1056 const Expr *VisitCastExpr(const CastExpr *E) {
1057 return Visit(E->getSubExpr());
1058 }
1059 const Expr *VisitParenExpr(const ParenExpr *E) {
1060 return Visit(E->getSubExpr());
1061 }
1062 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1063 return Visit(E->getSubExpr());
1064 }
1065 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1066 return Visit(E->getSubExpr());
1067 }
1068};
1069
1070} // end anonymous namespace
1071
1074
1076 const FieldDecl *FD, RecIndicesTy &Indices) {
1077 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1078 int64_t FieldNo = -1;
1079 for (const Decl *D : RD->decls()) {
1080 if (const auto *Field = dyn_cast<FieldDecl>(D)) {
1081 FieldNo = Layout.getLLVMFieldNo(Field);
1082 if (FD == Field) {
1083 Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
1084 return true;
1085 }
1086 }
1087
1088 if (const auto *Record = dyn_cast<RecordDecl>(D)) {
1089 ++FieldNo;
1090 if (getGEPIndicesToField(CGF, Record, FD, Indices)) {
1091 if (RD->isUnion())
1092 FieldNo = 0;
1093 Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
1094 return true;
1095 }
1096 }
1097 }
1098
1099 return false;
1100}
1101
1102/// This method is typically called in contexts where we can't generate
1103/// side-effects, like in __builtin_dynamic_object_size. When finding
1104/// expressions, only choose those that have either already been emitted or can
1105/// be loaded without side-effects.
1106///
1107/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1108/// within the top-level struct.
1109/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1111 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1112 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1113
1114 // Find the base struct expr (i.e. p in p->a.b.c.d).
1115 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1116 if (!StructBase || StructBase->HasSideEffects(getContext()))
1117 return nullptr;
1118
1119 llvm::Value *Res = nullptr;
1120 if (const auto *DRE = dyn_cast<DeclRefExpr>(StructBase)) {
1121 Res = EmitDeclRefLValue(DRE).getPointer(*this);
1122 Res = Builder.CreateAlignedLoad(ConvertType(DRE->getType()), Res,
1123 getPointerAlign(), "dre.load");
1124 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(StructBase)) {
1125 LValue LV = EmitMemberExpr(ME);
1126 Address Addr = LV.getAddress(*this);
1127 Res = Addr.emitRawPointer(*this);
1128 } else if (StructBase->getType()->isPointerType()) {
1129 LValueBaseInfo BaseInfo;
1130 TBAAAccessInfo TBAAInfo;
1131 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1132 Res = Addr.emitRawPointer(*this);
1133 } else {
1134 return nullptr;
1135 }
1136
1137 llvm::Value *Zero = Builder.getInt32(0);
1138 RecIndicesTy Indices;
1139
1140 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1141
1142 for (auto I = Indices.rbegin(), E = Indices.rend(); I != E; ++I)
1144 ConvertType(QualType(I->first->getTypeForDecl(), 0)), Res,
1145 {Zero, I->second}, "..counted_by.gep");
1146
1147 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), Res,
1148 getIntAlign(), "..counted_by.load");
1149}
1150
1152 if (!FD)
1153 return nullptr;
1154
1155 const auto *CAT = FD->getType()->getAs<CountAttributedType>();
1156 if (!CAT)
1157 return nullptr;
1158
1159 const auto *CountDRE = cast<DeclRefExpr>(CAT->getCountExpr());
1160 const auto *CountDecl = CountDRE->getDecl();
1161 if (const auto *IFD = dyn_cast<IndirectFieldDecl>(CountDecl))
1162 CountDecl = IFD->getAnonField();
1163
1164 return dyn_cast<FieldDecl>(CountDecl);
1165}
1166
1167void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
1168 llvm::Value *Index, QualType IndexType,
1169 bool Accessed) {
1170 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1171 "should not be called unless adding bounds checks");
1172 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1173 getLangOpts().getStrictFlexArraysLevel();
1174 QualType IndexedType;
1175 llvm::Value *Bound =
1176 getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1177
1178 EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1179}
1180
1181void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1182 llvm::Value *Index,
1183 QualType IndexType,
1184 QualType IndexedType, bool Accessed) {
1185 if (!Bound)
1186 return;
1187
1188 SanitizerScope SanScope(this);
1189
1190 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1191 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1192 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1193
1194 llvm::Constant *StaticData[] = {
1196 EmitCheckTypeDescriptor(IndexedType),
1197 EmitCheckTypeDescriptor(IndexType)
1198 };
1199 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1200 : Builder.CreateICmpULE(IndexVal, BoundVal);
1201 EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
1202 SanitizerHandler::OutOfBounds, StaticData, Index);
1203}
1204
1207 bool isInc, bool isPre) {
1208 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1209
1210 llvm::Value *NextVal;
1211 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1212 uint64_t AmountVal = isInc ? 1 : -1;
1213 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1214
1215 // Add the inc/dec to the real part.
1216 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1217 } else {
1218 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1219 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1220 if (!isInc)
1221 FVal.changeSign();
1222 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1223
1224 // Add the inc/dec to the real part.
1225 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1226 }
1227
1228 ComplexPairTy IncVal(NextVal, InVal.second);
1229
1230 // Store the updated result through the lvalue.
1231 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1232 if (getLangOpts().OpenMP)
1234 E->getSubExpr());
1235
1236 // If this is a postinc, return the value read from memory, otherwise use the
1237 // updated value.
1238 return isPre ? IncVal : InVal;
1239}
1240
1242 CodeGenFunction *CGF) {
1243 // Bind VLAs in the cast type.
1244 if (CGF && E->getType()->isVariablyModifiedType())
1246
1247 if (CGDebugInfo *DI = getModuleDebugInfo())
1248 DI->EmitExplicitCastType(E->getType());
1249}
1250
1251//===----------------------------------------------------------------------===//
1252// LValue Expression Emission
1253//===----------------------------------------------------------------------===//
1254
1256 TBAAAccessInfo *TBAAInfo,
1257 KnownNonNull_t IsKnownNonNull,
1258 CodeGenFunction &CGF) {
1259 // We allow this with ObjC object pointers because of fragile ABIs.
1260 assert(E->getType()->isPointerType() ||
1262 E = E->IgnoreParens();
1263
1264 // Casts:
1265 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1266 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1267 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1268
1269 switch (CE->getCastKind()) {
1270 // Non-converting casts (but not C's implicit conversion from void*).
1271 case CK_BitCast:
1272 case CK_NoOp:
1273 case CK_AddressSpaceConversion:
1274 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1275 if (PtrTy->getPointeeType()->isVoidType())
1276 break;
1277
1278 LValueBaseInfo InnerBaseInfo;
1279 TBAAAccessInfo InnerTBAAInfo;
1281 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1282 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1283 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1284
1285 if (isa<ExplicitCastExpr>(CE)) {
1286 LValueBaseInfo TargetTypeBaseInfo;
1287 TBAAAccessInfo TargetTypeTBAAInfo;
1289 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1290 if (TBAAInfo)
1291 *TBAAInfo =
1292 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1293 // If the source l-value is opaque, honor the alignment of the
1294 // casted-to type.
1295 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1296 if (BaseInfo)
1297 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1298 Addr.setAlignment(Align);
1299 }
1300 }
1301
1302 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1303 CE->getCastKind() == CK_BitCast) {
1304 if (auto PT = E->getType()->getAs<PointerType>())
1305 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1306 /*MayBeNull=*/true,
1308 CE->getBeginLoc());
1309 }
1310
1311 llvm::Type *ElemTy =
1313 Addr = Addr.withElementType(ElemTy);
1314 if (CE->getCastKind() == CK_AddressSpaceConversion)
1315 Addr = CGF.Builder.CreateAddrSpaceCast(
1316 Addr, CGF.ConvertType(E->getType()), ElemTy);
1317 return Addr;
1318 }
1319 break;
1320
1321 // Array-to-pointer decay.
1322 case CK_ArrayToPointerDecay:
1323 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1324
1325 // Derived-to-base conversions.
1326 case CK_UncheckedDerivedToBase:
1327 case CK_DerivedToBase: {
1328 // TODO: Support accesses to members of base classes in TBAA. For now, we
1329 // conservatively pretend that the complete object is of the base class
1330 // type.
1331 if (TBAAInfo)
1332 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1334 CE->getSubExpr(), BaseInfo, nullptr,
1335 (KnownNonNull_t)(IsKnownNonNull ||
1336 CE->getCastKind() == CK_UncheckedDerivedToBase));
1337 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1338 return CGF.GetAddressOfBaseClass(
1339 Addr, Derived, CE->path_begin(), CE->path_end(),
1340 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1341 }
1342
1343 // TODO: Is there any reason to treat base-to-derived conversions
1344 // specially?
1345 default:
1346 break;
1347 }
1348 }
1349
1350 // Unary &.
1351 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1352 if (UO->getOpcode() == UO_AddrOf) {
1353 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1354 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1355 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1356 return LV.getAddress(CGF);
1357 }
1358 }
1359
1360 // std::addressof and variants.
1361 if (auto *Call = dyn_cast<CallExpr>(E)) {
1362 switch (Call->getBuiltinCallee()) {
1363 default:
1364 break;
1365 case Builtin::BIaddressof:
1366 case Builtin::BI__addressof:
1367 case Builtin::BI__builtin_addressof: {
1368 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1369 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1370 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1371 return LV.getAddress(CGF);
1372 }
1373 }
1374 }
1375
1376 // TODO: conditional operators, comma.
1377
1378 // Otherwise, use the alignment of the type.
1381 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1382}
1383
1384/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1385/// derive a more accurate bound on the alignment of the pointer.
1387 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1388 KnownNonNull_t IsKnownNonNull) {
1389 Address Addr =
1390 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1391 if (IsKnownNonNull && !Addr.isKnownNonNull())
1392 Addr.setKnownNonNull();
1393 return Addr;
1394}
1395
1397 llvm::Value *V = RV.getScalarVal();
1398 if (auto MPT = T->getAs<MemberPointerType>())
1399 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1400 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1401}
1402
1404 if (Ty->isVoidType())
1405 return RValue::get(nullptr);
1406
1407 switch (getEvaluationKind(Ty)) {
1408 case TEK_Complex: {
1409 llvm::Type *EltTy =
1411 llvm::Value *U = llvm::UndefValue::get(EltTy);
1412 return RValue::getComplex(std::make_pair(U, U));
1413 }
1414
1415 // If this is a use of an undefined aggregate type, the aggregate must have an
1416 // identifiable address. Just because the contents of the value are undefined
1417 // doesn't mean that the address can't be taken and compared.
1418 case TEK_Aggregate: {
1419 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1420 return RValue::getAggregate(DestPtr);
1421 }
1422
1423 case TEK_Scalar:
1424 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1425 }
1426 llvm_unreachable("bad evaluation kind");
1427}
1428
1430 const char *Name) {
1431 ErrorUnsupported(E, Name);
1432 return GetUndefRValue(E->getType());
1433}
1434
1436 const char *Name) {
1437 ErrorUnsupported(E, Name);
1438 llvm::Type *ElTy = ConvertType(E->getType());
1439 llvm::Type *Ty = UnqualPtrTy;
1440 return MakeAddrLValue(
1441 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1442}
1443
1444bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1445 const Expr *Base = Obj;
1446 while (!isa<CXXThisExpr>(Base)) {
1447 // The result of a dynamic_cast can be null.
1448 if (isa<CXXDynamicCastExpr>(Base))
1449 return false;
1450
1451 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1452 Base = CE->getSubExpr();
1453 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1454 Base = PE->getSubExpr();
1455 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1456 if (UO->getOpcode() == UO_Extension)
1457 Base = UO->getSubExpr();
1458 else
1459 return false;
1460 } else {
1461 return false;
1462 }
1463 }
1464 return true;
1465}
1466
1467LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1468 LValue LV;
1469 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1470 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1471 else
1472 LV = EmitLValue(E);
1473 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1474 SanitizerSet SkippedChecks;
1475 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1476 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1477 if (IsBaseCXXThis)
1478 SkippedChecks.set(SanitizerKind::Alignment, true);
1479 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1480 SkippedChecks.set(SanitizerKind::Null, true);
1481 }
1482 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1483 }
1484 return LV;
1485}
1486
1487/// EmitLValue - Emit code to compute a designator that specifies the location
1488/// of the expression.
1489///
1490/// This can return one of two things: a simple address or a bitfield reference.
1491/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1492/// an LLVM pointer type.
1493///
1494/// If this returns a bitfield reference, nothing about the pointee type of the
1495/// LLVM value is known: For example, it may not be a pointer to an integer.
1496///
1497/// If this returns a normal address, and if the lvalue's C type is fixed size,
1498/// this method guarantees that the returned pointer type will point to an LLVM
1499/// type of the same size of the lvalue's type. If the lvalue has a variable
1500/// length type, this is not possible.
1501///
1503 KnownNonNull_t IsKnownNonNull) {
1504 LValue LV = EmitLValueHelper(E, IsKnownNonNull);
1505 if (IsKnownNonNull && !LV.isKnownNonNull())
1506 LV.setKnownNonNull();
1507 return LV;
1508}
1509
1511 const ASTContext &Ctx) {
1512 const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1513 if (isa<OpaqueValueExpr>(SE))
1514 return SE->getType();
1515 return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1516}
1517
1518LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1519 KnownNonNull_t IsKnownNonNull) {
1520 ApplyDebugLocation DL(*this, E);
1521 switch (E->getStmtClass()) {
1522 default: return EmitUnsupportedLValue(E, "l-value expression");
1523
1524 case Expr::ObjCPropertyRefExprClass:
1525 llvm_unreachable("cannot emit a property reference directly");
1526
1527 case Expr::ObjCSelectorExprClass:
1528 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1529 case Expr::ObjCIsaExprClass:
1530 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1531 case Expr::BinaryOperatorClass:
1532 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1533 case Expr::CompoundAssignOperatorClass: {
1534 QualType Ty = E->getType();
1535 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1536 Ty = AT->getValueType();
1537 if (!Ty->isAnyComplexType())
1538 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1539 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1540 }
1541 case Expr::CallExprClass:
1542 case Expr::CXXMemberCallExprClass:
1543 case Expr::CXXOperatorCallExprClass:
1544 case Expr::UserDefinedLiteralClass:
1545 return EmitCallExprLValue(cast<CallExpr>(E));
1546 case Expr::CXXRewrittenBinaryOperatorClass:
1547 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1548 IsKnownNonNull);
1549 case Expr::VAArgExprClass:
1550 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1551 case Expr::DeclRefExprClass:
1552 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1553 case Expr::ConstantExprClass: {
1554 const ConstantExpr *CE = cast<ConstantExpr>(E);
1555 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1557 return MakeNaturalAlignAddrLValue(Result, RetType);
1558 }
1559 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1560 }
1561 case Expr::ParenExprClass:
1562 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1563 case Expr::GenericSelectionExprClass:
1564 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1565 IsKnownNonNull);
1566 case Expr::PredefinedExprClass:
1567 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1568 case Expr::StringLiteralClass:
1569 return EmitStringLiteralLValue(cast<StringLiteral>(E));
1570 case Expr::ObjCEncodeExprClass:
1571 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1572 case Expr::PseudoObjectExprClass:
1573 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1574 case Expr::InitListExprClass:
1575 return EmitInitListLValue(cast<InitListExpr>(E));
1576 case Expr::CXXTemporaryObjectExprClass:
1577 case Expr::CXXConstructExprClass:
1578 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1579 case Expr::CXXBindTemporaryExprClass:
1580 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1581 case Expr::CXXUuidofExprClass:
1582 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1583 case Expr::LambdaExprClass:
1584 return EmitAggExprToLValue(E);
1585
1586 case Expr::ExprWithCleanupsClass: {
1587 const auto *cleanups = cast<ExprWithCleanups>(E);
1588 RunCleanupsScope Scope(*this);
1589 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1590 if (LV.isSimple()) {
1591 // Defend against branches out of gnu statement expressions surrounded by
1592 // cleanups.
1593 Address Addr = LV.getAddress(*this);
1594 llvm::Value *V = Addr.getBasePointer();
1595 Scope.ForceCleanup({&V});
1596 Addr.replaceBasePointer(V);
1597 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1598 LV.getBaseInfo(), LV.getTBAAInfo());
1599 }
1600 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1601 // bitfield lvalue or some other non-simple lvalue?
1602 return LV;
1603 }
1604
1605 case Expr::CXXDefaultArgExprClass: {
1606 auto *DAE = cast<CXXDefaultArgExpr>(E);
1607 CXXDefaultArgExprScope Scope(*this, DAE);
1608 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1609 }
1610 case Expr::CXXDefaultInitExprClass: {
1611 auto *DIE = cast<CXXDefaultInitExpr>(E);
1612 CXXDefaultInitExprScope Scope(*this, DIE);
1613 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1614 }
1615 case Expr::CXXTypeidExprClass:
1616 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1617
1618 case Expr::ObjCMessageExprClass:
1619 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1620 case Expr::ObjCIvarRefExprClass:
1621 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1622 case Expr::StmtExprClass:
1623 return EmitStmtExprLValue(cast<StmtExpr>(E));
1624 case Expr::UnaryOperatorClass:
1625 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1626 case Expr::ArraySubscriptExprClass:
1627 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1628 case Expr::MatrixSubscriptExprClass:
1629 return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1630 case Expr::ArraySectionExprClass:
1631 return EmitArraySectionExpr(cast<ArraySectionExpr>(E));
1632 case Expr::ExtVectorElementExprClass:
1633 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1634 case Expr::CXXThisExprClass:
1636 case Expr::MemberExprClass:
1637 return EmitMemberExpr(cast<MemberExpr>(E));
1638 case Expr::CompoundLiteralExprClass:
1639 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1640 case Expr::ConditionalOperatorClass:
1641 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1642 case Expr::BinaryConditionalOperatorClass:
1643 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1644 case Expr::ChooseExprClass:
1645 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1646 case Expr::OpaqueValueExprClass:
1647 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1648 case Expr::SubstNonTypeTemplateParmExprClass:
1649 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1650 IsKnownNonNull);
1651 case Expr::ImplicitCastExprClass:
1652 case Expr::CStyleCastExprClass:
1653 case Expr::CXXFunctionalCastExprClass:
1654 case Expr::CXXStaticCastExprClass:
1655 case Expr::CXXDynamicCastExprClass:
1656 case Expr::CXXReinterpretCastExprClass:
1657 case Expr::CXXConstCastExprClass:
1658 case Expr::CXXAddrspaceCastExprClass:
1659 case Expr::ObjCBridgedCastExprClass:
1660 return EmitCastLValue(cast<CastExpr>(E));
1661
1662 case Expr::MaterializeTemporaryExprClass:
1663 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1664
1665 case Expr::CoawaitExprClass:
1666 return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1667 case Expr::CoyieldExprClass:
1668 return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1669 case Expr::PackIndexingExprClass:
1670 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1671 }
1672}
1673
1674/// Given an object of the given canonical type, can we safely copy a
1675/// value out of it based on its initializer?
1677 assert(type.isCanonical());
1678 assert(!type->isReferenceType());
1679
1680 // Must be const-qualified but non-volatile.
1681 Qualifiers qs = type.getLocalQualifiers();
1682 if (!qs.hasConst() || qs.hasVolatile()) return false;
1683
1684 // Otherwise, all object types satisfy this except C++ classes with
1685 // mutable subobjects or non-trivial copy/destroy behavior.
1686 if (const auto *RT = dyn_cast<RecordType>(type))
1687 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1688 if (RD->hasMutableFields() || !RD->isTrivial())
1689 return false;
1690
1691 return true;
1692}
1693
1694/// Can we constant-emit a load of a reference to a variable of the
1695/// given type? This is different from predicates like
1696/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1697/// in situations that don't necessarily satisfy the language's rules
1698/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1699/// to do this with const float variables even if those variables
1700/// aren't marked 'constexpr'.
1708 type = type.getCanonicalType();
1709 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1710 if (isConstantEmittableObjectType(ref->getPointeeType()))
1712 return CEK_AsReferenceOnly;
1713 }
1715 return CEK_AsValueOnly;
1716 return CEK_None;
1717}
1718
1719/// Try to emit a reference to the given value without producing it as
1720/// an l-value. This is just an optimization, but it avoids us needing
1721/// to emit global copies of variables if they're named without triggering
1722/// a formal use in a context where we can't emit a direct reference to them,
1723/// for instance if a block or lambda or a member of a local class uses a
1724/// const int variable or constexpr variable from an enclosing function.
1725CodeGenFunction::ConstantEmission
1727 ValueDecl *value = refExpr->getDecl();
1728
1729 // The value needs to be an enum constant or a constant variable.
1731 if (isa<ParmVarDecl>(value)) {
1732 CEK = CEK_None;
1733 } else if (auto *var = dyn_cast<VarDecl>(value)) {
1734 CEK = checkVarTypeForConstantEmission(var->getType());
1735 } else if (isa<EnumConstantDecl>(value)) {
1736 CEK = CEK_AsValueOnly;
1737 } else {
1738 CEK = CEK_None;
1739 }
1740 if (CEK == CEK_None) return ConstantEmission();
1741
1742 Expr::EvalResult result;
1743 bool resultIsReference;
1744 QualType resultType;
1745
1746 // It's best to evaluate all the way as an r-value if that's permitted.
1747 if (CEK != CEK_AsReferenceOnly &&
1748 refExpr->EvaluateAsRValue(result, getContext())) {
1749 resultIsReference = false;
1750 resultType = refExpr->getType();
1751
1752 // Otherwise, try to evaluate as an l-value.
1753 } else if (CEK != CEK_AsValueOnly &&
1754 refExpr->EvaluateAsLValue(result, getContext())) {
1755 resultIsReference = true;
1756 resultType = value->getType();
1757
1758 // Failure.
1759 } else {
1760 return ConstantEmission();
1761 }
1762
1763 // In any case, if the initializer has side-effects, abandon ship.
1764 if (result.HasSideEffects)
1765 return ConstantEmission();
1766
1767 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1768 // referencing a global host variable by copy. In this case the lambda should
1769 // make a copy of the value of the global host variable. The DRE of the
1770 // captured reference variable cannot be emitted as load from the host
1771 // global variable as compile time constant, since the host variable is not
1772 // accessible on device. The DRE of the captured reference variable has to be
1773 // loaded from captures.
1774 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1776 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1777 if (MD && MD->getParent()->isLambda() &&
1778 MD->getOverloadedOperator() == OO_Call) {
1779 const APValue::LValueBase &base = result.Val.getLValueBase();
1780 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1781 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1782 if (!VD->hasAttr<CUDADeviceAttr>()) {
1783 return ConstantEmission();
1784 }
1785 }
1786 }
1787 }
1788 }
1789
1790 // Emit as a constant.
1791 auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1792 result.Val, resultType);
1793
1794 // Make sure we emit a debug reference to the global variable.
1795 // This should probably fire even for
1796 if (isa<VarDecl>(value)) {
1797 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1798 EmitDeclRefExprDbgValue(refExpr, result.Val);
1799 } else {
1800 assert(isa<EnumConstantDecl>(value));
1801 EmitDeclRefExprDbgValue(refExpr, result.Val);
1802 }
1803
1804 // If we emitted a reference constant, we need to dereference that.
1805 if (resultIsReference)
1807
1809}
1810
1812 const MemberExpr *ME) {
1813 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1814 // Try to emit static variable member expressions as DREs.
1815 return DeclRefExpr::Create(
1817 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1818 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1819 }
1820 return nullptr;
1821}
1822
1823CodeGenFunction::ConstantEmission
1826 return tryEmitAsConstant(DRE);
1827 return ConstantEmission();
1828}
1829
1831 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1832 assert(Constant && "not a constant");
1833 if (Constant.isReference())
1834 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1835 E->getExprLoc())
1836 .getScalarVal();
1837 return Constant.getValue();
1838}
1839
1840llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1842 return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(),
1843 lvalue.getType(), Loc, lvalue.getBaseInfo(),
1844 lvalue.getTBAAInfo(), lvalue.isNontemporal());
1845}
1846
1848 if (Ty->isBooleanType())
1849 return true;
1850
1851 if (const EnumType *ET = Ty->getAs<EnumType>())
1852 return ET->getDecl()->getIntegerType()->isBooleanType();
1853
1854 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1855 return hasBooleanRepresentation(AT->getValueType());
1856
1857 return false;
1858}
1859
1861 llvm::APInt &Min, llvm::APInt &End,
1862 bool StrictEnums, bool IsBool) {
1863 const EnumType *ET = Ty->getAs<EnumType>();
1864 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1865 ET && !ET->getDecl()->isFixed();
1866 if (!IsBool && !IsRegularCPlusPlusEnum)
1867 return false;
1868
1869 if (IsBool) {
1870 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1871 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1872 } else {
1873 const EnumDecl *ED = ET->getDecl();
1874 ED->getValueRange(End, Min);
1875 }
1876 return true;
1877}
1878
1879llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1880 llvm::APInt Min, End;
1881 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1883 return nullptr;
1884
1885 llvm::MDBuilder MDHelper(getLLVMContext());
1886 return MDHelper.createRange(Min, End);
1887}
1888
1891 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1892 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1893 if (!HasBoolCheck && !HasEnumCheck)
1894 return false;
1895
1896 bool IsBool = hasBooleanRepresentation(Ty) ||
1898 bool NeedsBoolCheck = HasBoolCheck && IsBool;
1899 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1900 if (!NeedsBoolCheck && !NeedsEnumCheck)
1901 return false;
1902
1903 // Single-bit booleans don't need to be checked. Special-case this to avoid
1904 // a bit width mismatch when handling bitfield values. This is handled by
1905 // EmitFromMemory for the non-bitfield case.
1906 if (IsBool &&
1907 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1908 return false;
1909
1910 llvm::APInt Min, End;
1911 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1912 return true;
1913
1914 auto &Ctx = getLLVMContext();
1915 SanitizerScope SanScope(this);
1916 llvm::Value *Check;
1917 --End;
1918 if (!Min) {
1919 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1920 } else {
1921 llvm::Value *Upper =
1922 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1923 llvm::Value *Lower =
1924 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1925 Check = Builder.CreateAnd(Upper, Lower);
1926 }
1927 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1930 NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
1931 EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1932 StaticArgs, EmitCheckValue(Value));
1933 return true;
1934}
1935
1936llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1937 QualType Ty,
1939 LValueBaseInfo BaseInfo,
1940 TBAAAccessInfo TBAAInfo,
1941 bool isNontemporal) {
1942 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
1943 if (GV->isThreadLocal())
1944 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
1946
1947 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1948 // Boolean vectors use `iN` as storage type.
1949 if (ClangVecTy->isExtVectorBoolType()) {
1950 llvm::Type *ValTy = ConvertType(Ty);
1951 unsigned ValNumElems =
1952 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1953 // Load the `iP` storage object (P is the padded vector size).
1954 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
1955 const auto *RawIntTy = RawIntV->getType();
1956 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
1957 // Bitcast iP --> <P x i1>.
1958 auto *PaddedVecTy = llvm::FixedVectorType::get(
1959 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1960 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
1961 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1962 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
1963
1964 return EmitFromMemory(V, Ty);
1965 }
1966
1967 // Handle vectors of size 3 like size 4 for better performance.
1968 const llvm::Type *EltTy = Addr.getElementType();
1969 const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
1970
1971 if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {
1972
1973 llvm::VectorType *vec4Ty =
1974 llvm::FixedVectorType::get(VTy->getElementType(), 4);
1975 Address Cast = Addr.withElementType(vec4Ty);
1976 // Now load value.
1977 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1978
1979 // Shuffle vector to get vec3.
1980 V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec");
1981 return EmitFromMemory(V, Ty);
1982 }
1983 }
1984
1985 // Atomic operations have to be done on integral types.
1986 LValue AtomicLValue =
1987 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1988 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
1989 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
1990 }
1991
1992 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
1993 if (isNontemporal) {
1994 llvm::MDNode *Node = llvm::MDNode::get(
1995 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1996 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
1997 }
1998
1999 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2000
2001 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2002 // In order to prevent the optimizer from throwing away the check, don't
2003 // attach range metadata to the load.
2004 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
2005 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2006 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2007 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2008 llvm::MDNode::get(getLLVMContext(), std::nullopt));
2009 }
2010
2011 return EmitFromMemory(Load, Ty);
2012}
2013
2014llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2015 // Bool has a different representation in memory than in registers.
2016 if (hasBooleanRepresentation(Ty)) {
2017 // This should really always be an i1, but sometimes it's already
2018 // an i8, and it's awkward to track those cases down.
2019 if (Value->getType()->isIntegerTy(1))
2020 return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
2021 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
2022 "wrong value rep of bool");
2023 }
2024
2025 return Value;
2026}
2027
2028llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2029 // Bool has a different representation in memory than in registers.
2030 if (hasBooleanRepresentation(Ty)) {
2031 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
2032 "wrong value rep of bool");
2033 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
2034 }
2035 if (Ty->isExtVectorBoolType()) {
2036 const auto *RawIntTy = Value->getType();
2037 // Bitcast iP --> <P x i1>.
2038 auto *PaddedVecTy = llvm::FixedVectorType::get(
2039 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2040 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2041 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2042 llvm::Type *ValTy = ConvertType(Ty);
2043 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2044 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2045 }
2046
2047 return Value;
2048}
2049
2050// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2051// MatrixType), if it points to a array (the memory type of MatrixType).
2053 CodeGenFunction &CGF,
2054 bool IsVector = true) {
2055 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2056 if (ArrayTy && IsVector) {
2057 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2058 ArrayTy->getNumElements());
2059
2060 return Addr.withElementType(VectorTy);
2061 }
2062 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2063 if (VectorTy && !IsVector) {
2064 auto *ArrayTy = llvm::ArrayType::get(
2065 VectorTy->getElementType(),
2066 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2067
2068 return Addr.withElementType(ArrayTy);
2069 }
2070
2071 return Addr;
2072}
2073
2074// Emit a store of a matrix LValue. This may require casting the original
2075// pointer to memory address (ArrayType) to a pointer to the value type
2076// (VectorType).
2077static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2078 bool isInit, CodeGenFunction &CGF) {
2079 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF,
2080 value->getType()->isVectorTy());
2081 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2082 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2083 lvalue.isNontemporal());
2084}
2085
2086void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
2087 bool Volatile, QualType Ty,
2088 LValueBaseInfo BaseInfo,
2089 TBAAAccessInfo TBAAInfo,
2090 bool isInit, bool isNontemporal) {
2091 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2092 if (GV->isThreadLocal())
2093 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2095
2096 llvm::Type *SrcTy = Value->getType();
2097 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2098 auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);
2099 if (VecTy && ClangVecTy->isExtVectorBoolType()) {
2100 auto *MemIntTy = cast<llvm::IntegerType>(Addr.getElementType());
2101 // Expand to the memory bit width.
2102 unsigned MemNumElems = MemIntTy->getPrimitiveSizeInBits();
2103 // <N x i1> --> <P x i1>.
2104 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2105 // <P x i1> --> iP.
2106 Value = Builder.CreateBitCast(Value, MemIntTy);
2107 } else if (!CGM.getCodeGenOpts().PreserveVec3Type) {
2108 // Handle vec3 special.
2109 if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
2110 // Our source is a vec3, do a shuffle vector to make it a vec4.
2111 Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
2112 "extractVec");
2113 SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
2114 }
2115 if (Addr.getElementType() != SrcTy) {
2116 Addr = Addr.withElementType(SrcTy);
2117 }
2118 }
2119 }
2120
2121 Value = EmitToMemory(Value, Ty);
2122
2123 LValue AtomicLValue =
2124 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2125 if (Ty->isAtomicType() ||
2126 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2127 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2128 return;
2129 }
2130
2131 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2132 if (isNontemporal) {
2133 llvm::MDNode *Node =
2134 llvm::MDNode::get(Store->getContext(),
2135 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2136 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2137 }
2138
2139 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2140}
2141
2142void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2143 bool isInit) {
2144 if (lvalue.getType()->isConstantMatrixType()) {
2145 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2146 return;
2147 }
2148
2149 EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(),
2150 lvalue.getType(), lvalue.getBaseInfo(),
2151 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2152}
2153
2154// Emit a load of a LValue of matrix type. This may require casting the pointer
2155// to memory address (ArrayType) to a pointer to the value type (VectorType).
2157 CodeGenFunction &CGF) {
2158 assert(LV.getType()->isConstantMatrixType());
2159 Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF);
2160 LV.setAddress(Addr);
2161 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2162}
2163
2164/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2165/// method emits the address of the lvalue, then loads the result as an rvalue,
2166/// returning the rvalue.
2168 if (LV.isObjCWeak()) {
2169 // load of a __weak object.
2170 Address AddrWeakObj = LV.getAddress(*this);
2172 AddrWeakObj));
2173 }
2175 // In MRC mode, we do a load+autorelease.
2176 if (!getLangOpts().ObjCAutoRefCount) {
2177 return RValue::get(EmitARCLoadWeak(LV.getAddress(*this)));
2178 }
2179
2180 // In ARC mode, we load retained and then consume the value.
2181 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this));
2182 Object = EmitObjCConsumeObject(LV.getType(), Object);
2183 return RValue::get(Object);
2184 }
2185
2186 if (LV.isSimple()) {
2187 assert(!LV.getType()->isFunctionType());
2188
2189 if (LV.getType()->isConstantMatrixType())
2190 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2191
2192 // Everything needs a load.
2193 return RValue::get(EmitLoadOfScalar(LV, Loc));
2194 }
2195
2196 if (LV.isVectorElt()) {
2197 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2198 LV.isVolatileQualified());
2199 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2200 "vecext"));
2201 }
2202
2203 // If this is a reference to a subset of the elements of a vector, either
2204 // shuffle the input or extract/insert them as appropriate.
2205 if (LV.isExtVectorElt()) {
2207 }
2208
2209 // Global Register variables always invoke intrinsics
2210 if (LV.isGlobalReg())
2211 return EmitLoadOfGlobalRegLValue(LV);
2212
2213 if (LV.isMatrixElt()) {
2214 llvm::Value *Idx = LV.getMatrixIdx();
2215 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2216 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2217 llvm::MatrixBuilder MB(Builder);
2218 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2219 }
2220 llvm::LoadInst *Load =
2222 return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2223 }
2224
2225 assert(LV.isBitField() && "Unknown LValue type!");
2226 return EmitLoadOfBitfieldLValue(LV, Loc);
2227}
2228
2231 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2232
2233 // Get the output type.
2234 llvm::Type *ResLTy = ConvertType(LV.getType());
2235
2236 Address Ptr = LV.getBitFieldAddress();
2237 llvm::Value *Val =
2238 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2239
2240 bool UseVolatile = LV.isVolatileQualified() &&
2241 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2242 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2243 const unsigned StorageSize =
2244 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2245 if (Info.IsSigned) {
2246 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2247 unsigned HighBits = StorageSize - Offset - Info.Size;
2248 if (HighBits)
2249 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2250 if (Offset + HighBits)
2251 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2252 } else {
2253 if (Offset)
2254 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2255 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2256 Val = Builder.CreateAnd(
2257 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2258 }
2259 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2260 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2261 return RValue::get(Val);
2262}
2263
2264// If this is a reference to a subset of the elements of a vector, create an
2265// appropriate shufflevector.
2267 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2268 LV.isVolatileQualified());
2269
2270 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2271 // IR value to a vector here allows the rest of codegen to behave as normal.
2272 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2273 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2274 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2275 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2276 }
2277
2278 const llvm::Constant *Elts = LV.getExtVectorElts();
2279
2280 // If the result of the expression is a non-vector type, we must be extracting
2281 // a single element. Just codegen as an extractelement.
2282 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2283 if (!ExprVT) {
2284 unsigned InIdx = getAccessedFieldNo(0, Elts);
2285 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2286 return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2287 }
2288
2289 // Always use shuffle vector to try to retain the original program structure
2290 unsigned NumResultElts = ExprVT->getNumElements();
2291
2293 for (unsigned i = 0; i != NumResultElts; ++i)
2294 Mask.push_back(getAccessedFieldNo(i, Elts));
2295
2296 Vec = Builder.CreateShuffleVector(Vec, Mask);
2297 return RValue::get(Vec);
2298}
2299
2300/// Generates lvalue for partial ext_vector access.
2302 Address VectorAddress = LV.getExtVectorAddress();
2303 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2304 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2305
2306 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2307
2308 const llvm::Constant *Elts = LV.getExtVectorElts();
2309 unsigned ix = getAccessedFieldNo(0, Elts);
2310
2311 Address VectorBasePtrPlusIx =
2312 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2313 "vector.elt");
2314
2315 return VectorBasePtrPlusIx;
2316}
2317
2318/// Load of global gamed gegisters are always calls to intrinsics.
2320 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2321 "Bad type for register variable");
2322 llvm::MDNode *RegName = cast<llvm::MDNode>(
2323 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2324
2325 // We accept integer and pointer types only
2326 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2327 llvm::Type *Ty = OrigTy;
2328 if (OrigTy->isPointerTy())
2329 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2330 llvm::Type *Types[] = { Ty };
2331
2332 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2333 llvm::Value *Call = Builder.CreateCall(
2334 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2335 if (OrigTy->isPointerTy())
2336 Call = Builder.CreateIntToPtr(Call, OrigTy);
2337 return RValue::get(Call);
2338}
2339
2340/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2341/// lvalue, where both are guaranteed to the have the same type, and that type
2342/// is 'Ty'.
2344 bool isInit) {
2345 if (!Dst.isSimple()) {
2346 if (Dst.isVectorElt()) {
2347 // Read/modify/write the vector, inserting the new element.
2348 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2349 Dst.isVolatileQualified());
2350 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2351 if (IRStoreTy) {
2352 auto *IRVecTy = llvm::FixedVectorType::get(
2353 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2354 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2355 // iN --> <N x i1>.
2356 }
2357 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
2358 Dst.getVectorIdx(), "vecins");
2359 if (IRStoreTy) {
2360 // <N x i1> --> <iN>.
2361 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2362 }
2364 Dst.isVolatileQualified());
2365 return;
2366 }
2367
2368 // If this is an update of extended vector elements, insert them as
2369 // appropriate.
2370 if (Dst.isExtVectorElt())
2372
2373 if (Dst.isGlobalReg())
2374 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2375
2376 if (Dst.isMatrixElt()) {
2377 llvm::Value *Idx = Dst.getMatrixIdx();
2378 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2379 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2380 llvm::MatrixBuilder MB(Builder);
2381 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2382 }
2383 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2384 llvm::Value *Vec =
2385 Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2387 Dst.isVolatileQualified());
2388 return;
2389 }
2390
2391 assert(Dst.isBitField() && "Unknown LValue type");
2392 return EmitStoreThroughBitfieldLValue(Src, Dst);
2393 }
2394
2395 // There's special magic for assigning into an ARC-qualified l-value.
2396 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2397 switch (Lifetime) {
2399 llvm_unreachable("present but none");
2400
2402 // nothing special
2403 break;
2404
2406 if (isInit) {
2407 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2408 break;
2409 }
2410 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2411 return;
2412
2414 if (isInit)
2415 // Initialize and then skip the primitive store.
2416 EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal());
2417 else
2418 EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(),
2419 /*ignore*/ true);
2420 return;
2421
2424 Src.getScalarVal()));
2425 // fall into the normal path
2426 break;
2427 }
2428 }
2429
2430 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2431 // load of a __weak object.
2432 Address LvalueDst = Dst.getAddress(*this);
2433 llvm::Value *src = Src.getScalarVal();
2434 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2435 return;
2436 }
2437
2438 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2439 // load of a __strong object.
2440 Address LvalueDst = Dst.getAddress(*this);
2441 llvm::Value *src = Src.getScalarVal();
2442 if (Dst.isObjCIvar()) {
2443 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2444 llvm::Type *ResultType = IntPtrTy;
2446 llvm::Value *RHS = dst.emitRawPointer(*this);
2447 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2448 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2449 ResultType, "sub.ptr.lhs.cast");
2450 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2451 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2452 } else if (Dst.isGlobalObjCRef()) {
2453 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2454 Dst.isThreadLocalRef());
2455 }
2456 else
2457 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2458 return;
2459 }
2460
2461 assert(Src.isScalar() && "Can't emit an agg store with this method");
2462 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2463}
2464
2466 llvm::Value **Result) {
2467 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2468 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
2469 Address Ptr = Dst.getBitFieldAddress();
2470
2471 // Get the source value, truncated to the width of the bit-field.
2472 llvm::Value *SrcVal = Src.getScalarVal();
2473
2474 // Cast the source to the storage type and shift it into place.
2475 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2476 /*isSigned=*/false);
2477 llvm::Value *MaskedVal = SrcVal;
2478
2479 const bool UseVolatile =
2480 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2481 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2482 const unsigned StorageSize =
2483 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2484 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2485 // See if there are other bits in the bitfield's storage we'll need to load
2486 // and mask together with source before storing.
2487 if (StorageSize != Info.Size) {
2488 assert(StorageSize > Info.Size && "Invalid bitfield size.");
2489 llvm::Value *Val =
2490 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2491
2492 // Mask the source value as needed.
2494 SrcVal = Builder.CreateAnd(
2495 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2496 "bf.value");
2497 MaskedVal = SrcVal;
2498 if (Offset)
2499 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2500
2501 // Mask out the original value.
2502 Val = Builder.CreateAnd(
2503 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2504 "bf.clear");
2505
2506 // Or together the unchanged values and the source value.
2507 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2508 } else {
2509 assert(Offset == 0);
2510 // According to the AACPS:
2511 // When a volatile bit-field is written, and its container does not overlap
2512 // with any non-bit-field member, its container must be read exactly once
2513 // and written exactly once using the access width appropriate to the type
2514 // of the container. The two accesses are not atomic.
2515 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2516 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2517 Builder.CreateLoad(Ptr, true, "bf.load");
2518 }
2519
2520 // Write the new value back out.
2521 Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2522
2523 // Return the new value of the bit-field, if requested.
2524 if (Result) {
2525 llvm::Value *ResultVal = MaskedVal;
2526
2527 // Sign extend the value if needed.
2528 if (Info.IsSigned) {
2529 assert(Info.Size <= StorageSize);
2530 unsigned HighBits = StorageSize - Info.Size;
2531 if (HighBits) {
2532 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2533 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2534 }
2535 }
2536
2537 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2538 "bf.result.cast");
2539 *Result = EmitFromMemory(ResultVal, Dst.getType());
2540 }
2541}
2542
2544 LValue Dst) {
2545 // HLSL allows storing to scalar values through ExtVector component LValues.
2546 // To support this we need to handle the case where the destination address is
2547 // a scalar.
2548 Address DstAddr = Dst.getExtVectorAddress();
2549 if (!DstAddr.getElementType()->isVectorTy()) {
2550 assert(!Dst.getType()->isVectorType() &&
2551 "this should only occur for non-vector l-values");
2552 Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());
2553 return;
2554 }
2555
2556 // This access turns into a read/modify/write of the vector. Load the input
2557 // value now.
2558 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2559 const llvm::Constant *Elts = Dst.getExtVectorElts();
2560
2561 llvm::Value *SrcVal = Src.getScalarVal();
2562
2563 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2564 unsigned NumSrcElts = VTy->getNumElements();
2565 unsigned NumDstElts =
2566 cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2567 if (NumDstElts == NumSrcElts) {
2568 // Use shuffle vector is the src and destination are the same number of
2569 // elements and restore the vector mask since it is on the side it will be
2570 // stored.
2571 SmallVector<int, 4> Mask(NumDstElts);
2572 for (unsigned i = 0; i != NumSrcElts; ++i)
2573 Mask[getAccessedFieldNo(i, Elts)] = i;
2574
2575 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2576 } else if (NumDstElts > NumSrcElts) {
2577 // Extended the source vector to the same length and then shuffle it
2578 // into the destination.
2579 // FIXME: since we're shuffling with undef, can we just use the indices
2580 // into that? This could be simpler.
2581 SmallVector<int, 4> ExtMask;
2582 for (unsigned i = 0; i != NumSrcElts; ++i)
2583 ExtMask.push_back(i);
2584 ExtMask.resize(NumDstElts, -1);
2585 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2586 // build identity
2588 for (unsigned i = 0; i != NumDstElts; ++i)
2589 Mask.push_back(i);
2590
2591 // When the vector size is odd and .odd or .hi is used, the last element
2592 // of the Elts constant array will be one past the size of the vector.
2593 // Ignore the last element here, if it is greater than the mask size.
2594 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2595 NumSrcElts--;
2596
2597 // modify when what gets shuffled in
2598 for (unsigned i = 0; i != NumSrcElts; ++i)
2599 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2600 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2601 } else {
2602 // We should never shorten the vector
2603 llvm_unreachable("unexpected shorten vector length");
2604 }
2605 } else {
2606 // If the Src is a scalar (not a vector), and the target is a vector it must
2607 // be updating one element.
2608 unsigned InIdx = getAccessedFieldNo(0, Elts);
2609 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2610 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2611 }
2612
2614 Dst.isVolatileQualified());
2615}
2616
2617/// Store of global named registers are always calls to intrinsics.
2619 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2620 "Bad type for register variable");
2621 llvm::MDNode *RegName = cast<llvm::MDNode>(
2622 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2623 assert(RegName && "Register LValue is not metadata");
2624
2625 // We accept integer and pointer types only
2626 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2627 llvm::Type *Ty = OrigTy;
2628 if (OrigTy->isPointerTy())
2629 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2630 llvm::Type *Types[] = { Ty };
2631
2632 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2633 llvm::Value *Value = Src.getScalarVal();
2634 if (OrigTy->isPointerTy())
2635 Value = Builder.CreatePtrToInt(Value, Ty);
2636 Builder.CreateCall(
2637 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2638}
2639
2640// setObjCGCLValueClass - sets class of the lvalue for the purpose of
2641// generating write-barries API. It is currently a global, ivar,
2642// or neither.
2643static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2644 LValue &LV,
2645 bool IsMemberAccess=false) {
2646 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2647 return;
2648
2649 if (isa<ObjCIvarRefExpr>(E)) {
2650 QualType ExpTy = E->getType();
2651 if (IsMemberAccess && ExpTy->isPointerType()) {
2652 // If ivar is a structure pointer, assigning to field of
2653 // this struct follows gcc's behavior and makes it a non-ivar
2654 // writer-barrier conservatively.
2655 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2656 if (ExpTy->isRecordType()) {
2657 LV.setObjCIvar(false);
2658 return;
2659 }
2660 }
2661 LV.setObjCIvar(true);
2662 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2663 LV.setBaseIvarExp(Exp->getBase());
2664 LV.setObjCArray(E->getType()->isArrayType());
2665 return;
2666 }
2667
2668 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2669 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2670 if (VD->hasGlobalStorage()) {
2671 LV.setGlobalObjCRef(true);
2672 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2673 }
2674 }
2675 LV.setObjCArray(E->getType()->isArrayType());
2676 return;
2677 }
2678
2679 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2680 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2681 return;
2682 }
2683
2684 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2685 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2686 if (LV.isObjCIvar()) {
2687 // If cast is to a structure pointer, follow gcc's behavior and make it
2688 // a non-ivar write-barrier.
2689 QualType ExpTy = E->getType();
2690 if (ExpTy->isPointerType())
2691 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2692 if (ExpTy->isRecordType())
2693 LV.setObjCIvar(false);
2694 }
2695 return;
2696 }
2697
2698 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2699 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2700 return;
2701 }
2702
2703 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2704 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2705 return;
2706 }
2707
2708 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2709 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2710 return;
2711 }
2712
2713 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2714 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2715 return;
2716 }
2717
2718 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2719 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2720 if (LV.isObjCIvar() && !LV.isObjCArray())
2721 // Using array syntax to assigning to what an ivar points to is not
2722 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2723 LV.setObjCIvar(false);
2724 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2725 // Using array syntax to assigning to what global points to is not
2726 // same as assigning to the global itself. {id *G;} G[i] = 0;
2727 LV.setGlobalObjCRef(false);
2728 return;
2729 }
2730
2731 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2732 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2733 // We don't know if member is an 'ivar', but this flag is looked at
2734 // only in the context of LV.isObjCIvar().
2735 LV.setObjCArray(E->getType()->isArrayType());
2736 return;
2737 }
2738}
2739
2741 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2742 llvm::Type *RealVarTy, SourceLocation Loc) {
2743 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2745 CGF, VD, Addr, Loc);
2746 else
2747 Addr =
2748 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2749
2750 Addr = Addr.withElementType(RealVarTy);
2751 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2752}
2753
2755 const VarDecl *VD, QualType T) {
2756 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2757 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2758 // Return an invalid address if variable is MT_To (or MT_Enter starting with
2759 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2760 // and MT_To (or MT_Enter) with unified memory, return a valid address.
2761 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2762 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2764 return Address::invalid();
2765 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2766 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2767 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2769 "Expected link clause OR to clause with unified memory enabled.");
2770 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2772 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2773}
2774
2775Address
2777 LValueBaseInfo *PointeeBaseInfo,
2778 TBAAAccessInfo *PointeeTBAAInfo) {
2779 llvm::LoadInst *Load =
2780 Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
2782 return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
2783 CharUnits(), /*ForPointeeType=*/true,
2784 PointeeBaseInfo, PointeeTBAAInfo);
2785}
2786
2788 LValueBaseInfo PointeeBaseInfo;
2789 TBAAAccessInfo PointeeTBAAInfo;
2790 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2791 &PointeeTBAAInfo);
2792 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2793 PointeeBaseInfo, PointeeTBAAInfo);
2794}
2795
2797 const PointerType *PtrTy,
2798 LValueBaseInfo *BaseInfo,
2799 TBAAAccessInfo *TBAAInfo) {
2800 llvm::Value *Addr = Builder.CreateLoad(Ptr);
2801 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
2802 CharUnits(), /*ForPointeeType=*/true,
2803 BaseInfo, TBAAInfo);
2804}
2805
2807 const PointerType *PtrTy) {
2808 LValueBaseInfo BaseInfo;
2809 TBAAAccessInfo TBAAInfo;
2810 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2811 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2812}
2813
2815 const Expr *E, const VarDecl *VD) {
2816 QualType T = E->getType();
2817
2818 // If it's thread_local, emit a call to its wrapper function instead.
2819 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2821 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2822 // Check if the variable is marked as declare target with link clause in
2823 // device codegen.
2824 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
2825 Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2826 if (Addr.isValid())
2827 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2828 }
2829
2830 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2831
2832 if (VD->getTLSKind() != VarDecl::TLS_None)
2833 V = CGF.Builder.CreateThreadLocalAddress(V);
2834
2835 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2836 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2837 Address Addr(V, RealVarTy, Alignment);
2838 // Emit reference to the private copy of the variable if it is an OpenMP
2839 // threadprivate variable.
2840 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2841 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2842 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2843 E->getExprLoc());
2844 }
2845 LValue LV = VD->getType()->isReferenceType() ?
2846 CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2849 setObjCGCLValueClass(CGF.getContext(), E, LV);
2850 return LV;
2851}
2852
2853static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
2854 GlobalDecl GD) {
2855 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2856 if (FD->hasAttr<WeakRefAttr>()) {
2857 ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
2858 return aliasee.getPointer();
2859 }
2860
2861 llvm::Constant *V = CGM.GetAddrOfFunction(GD);
2862 return V;
2863}
2864
2866 GlobalDecl GD) {
2867 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2868 llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, GD);
2869 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2870 return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2872}
2873
2875 llvm::Value *ThisValue) {
2876
2877 return CGF.EmitLValueForLambdaField(FD, ThisValue);
2878}
2879
2880/// Named Registers are named metadata pointing to the register name
2881/// which will be read from/written to as an argument to the intrinsic
2882/// @llvm.read/write_register.
2883/// So far, only the name is being passed down, but other options such as
2884/// register type, allocation type or even optimization options could be
2885/// passed down via the metadata node.
2887 SmallString<64> Name("llvm.named.register.");
2888 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2889 assert(Asm->getLabel().size() < 64-Name.size() &&
2890 "Register name too big");
2891 Name.append(Asm->getLabel());
2892 llvm::NamedMDNode *M =
2893 CGM.getModule().getOrInsertNamedMetadata(Name);
2894 if (M->getNumOperands() == 0) {
2895 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2896 Asm->getLabel());
2897 llvm::Metadata *Ops[] = {Str};
2898 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2899 }
2900
2901 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2902
2903 llvm::Value *Ptr =
2904 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2905 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
2906}
2907
2908/// Determine whether we can emit a reference to \p VD from the current
2909/// context, despite not necessarily having seen an odr-use of the variable in
2910/// this context.
2912 const DeclRefExpr *E,
2913 const VarDecl *VD) {
2914 // For a variable declared in an enclosing scope, do not emit a spurious
2915 // reference even if we have a capture, as that will emit an unwarranted
2916 // reference to our capture state, and will likely generate worse code than
2917 // emitting a local copy.
2919 return false;
2920
2921 // For a local declaration declared in this function, we can always reference
2922 // it even if we don't have an odr-use.
2923 if (VD->hasLocalStorage()) {
2924 return VD->getDeclContext() ==
2925 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2926 }
2927
2928 // For a global declaration, we can emit a reference to it if we know
2929 // for sure that we are able to emit a definition of it.
2930 VD = VD->getDefinition(CGF.getContext());
2931 if (!VD)
2932 return false;
2933
2934 // Don't emit a spurious reference if it might be to a variable that only
2935 // exists on a different device / target.
2936 // FIXME: This is unnecessarily broad. Check whether this would actually be a
2937 // cross-target reference.
2938 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
2939 CGF.getLangOpts().OpenCL) {
2940 return false;
2941 }
2942
2943 // We can emit a spurious reference only if the linkage implies that we'll
2944 // be emitting a non-interposable symbol that will be retained until link
2945 // time.
2946 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
2947 case llvm::GlobalValue::ExternalLinkage:
2948 case llvm::GlobalValue::LinkOnceODRLinkage:
2949 case llvm::GlobalValue::WeakODRLinkage:
2950 case llvm::GlobalValue::InternalLinkage:
2951 case llvm::GlobalValue::PrivateLinkage:
2952 return true;
2953 default:
2954 return false;
2955 }
2956}
2957
2959 const NamedDecl *ND = E->getDecl();
2960 QualType T = E->getType();
2961
2962 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
2963 "should not emit an unevaluated operand");
2964
2965 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2966 // Global Named registers access via intrinsics only
2967 if (VD->getStorageClass() == SC_Register &&
2968 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
2969 return EmitGlobalNamedRegister(VD, CGM);
2970
2971 // If this DeclRefExpr does not constitute an odr-use of the variable,
2972 // we're not permitted to emit a reference to it in general, and it might
2973 // not be captured if capture would be necessary for a use. Emit the
2974 // constant value directly instead.
2975 if (E->isNonOdrUse() == NOUR_Constant &&
2976 (VD->getType()->isReferenceType() ||
2977 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
2978 VD->getAnyInitializer(VD);
2979 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
2980 E->getLocation(), *VD->evaluateValue(), VD->getType());
2981 assert(Val && "failed to emit constant expression");
2982
2983 Address Addr = Address::invalid();
2984 if (!VD->getType()->isReferenceType()) {
2985 // Spill the constant value to a global.
2986 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
2987 getContext().getDeclAlign(VD));
2988 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
2989 auto *PTy = llvm::PointerType::get(
2990 VarTy, getTypes().getTargetAddressSpace(VD->getType()));
2991 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
2992 } else {
2993 // Should we be using the alignment of the constant pointer we emitted?
2994 CharUnits Alignment =
2996 /* BaseInfo= */ nullptr,
2997 /* TBAAInfo= */ nullptr,
2998 /* forPointeeType= */ true);
2999 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3000 }
3001 return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
3002 }
3003
3004 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3005
3006 // Check for captured variables.
3008 VD = VD->getCanonicalDecl();
3009 if (auto *FD = LambdaCaptureFields.lookup(VD))
3010 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3011 if (CapturedStmtInfo) {
3012 auto I = LocalDeclMap.find(VD);
3013 if (I != LocalDeclMap.end()) {
3014 LValue CapLVal;
3015 if (VD->getType()->isReferenceType())
3016 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3018 else
3019 CapLVal = MakeAddrLValue(I->second, T);
3020 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3021 // in simd context.
3022 if (getLangOpts().OpenMP &&
3024 CapLVal.setNontemporal(/*Value=*/true);
3025 return CapLVal;
3026 }
3027 LValue CapLVal =
3030 Address LValueAddress = CapLVal.getAddress(*this);
3031 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3032 LValueAddress.getElementType(),
3033 getContext().getDeclAlign(VD)),
3034 CapLVal.getType(),
3036 CapLVal.getTBAAInfo());
3037 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3038 // in simd context.
3039 if (getLangOpts().OpenMP &&
3041 CapLVal.setNontemporal(/*Value=*/true);
3042 return CapLVal;
3043 }
3044
3045 assert(isa<BlockDecl>(CurCodeDecl));
3046 Address addr = GetAddrOfBlockDecl(VD);
3047 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3048 }
3049 }
3050
3051 // FIXME: We should be able to assert this for FunctionDecls as well!
3052 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3053 // those with a valid source location.
3054 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3055 !E->getLocation().isValid()) &&
3056 "Should not use decl without marking it used!");
3057
3058 if (ND->hasAttr<WeakRefAttr>()) {
3059 const auto *VD = cast<ValueDecl>(ND);
3061 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3062 }
3063
3064 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3065 // Check if this is a global variable.
3066 if (VD->hasLinkage() || VD->isStaticDataMember())
3067 return EmitGlobalVarDeclLValue(*this, E, VD);
3068
3069 Address addr = Address::invalid();
3070
3071 // The variable should generally be present in the local decl map.
3072 auto iter = LocalDeclMap.find(VD);
3073 if (iter != LocalDeclMap.end()) {
3074 addr = iter->second;
3075
3076 // Otherwise, it might be static local we haven't emitted yet for
3077 // some reason; most likely, because it's in an outer function.
3078 } else if (VD->isStaticLocal()) {
3079 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3081 addr = Address(
3082 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3083
3084 // No other cases for now.
3085 } else {
3086 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3087 }
3088
3089 // Handle threadlocal function locals.
3090 if (VD->getTLSKind() != VarDecl::TLS_None)
3091 addr = addr.withPointer(
3092 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3094
3095 // Check for OpenMP threadprivate variables.
3096 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3097 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3099 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3100 E->getExprLoc());
3101 }
3102
3103 // Drill into block byref variables.
3104 bool isBlockByref = VD->isEscapingByref();
3105 if (isBlockByref) {
3106 addr = emitBlockByrefAddress(addr, VD);
3107 }
3108
3109 // Drill into reference types.
3110 LValue LV = VD->getType()->isReferenceType() ?
3111 EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
3113
3114 bool isLocalStorage = VD->hasLocalStorage();
3115
3116 bool NonGCable = isLocalStorage &&
3117 !VD->getType()->isReferenceType() &&
3118 !isBlockByref;
3119 if (NonGCable) {
3121 LV.setNonGC(true);
3122 }
3123
3124 bool isImpreciseLifetime =
3125 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3126 if (isImpreciseLifetime)
3129 return LV;
3130 }
3131
3132 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
3133 LValue LV = EmitFunctionDeclLValue(*this, E, FD);
3134
3135 // Emit debuginfo for the function declaration if the target wants to.
3136 if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) {
3137 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) {
3138 auto *Fn =
3139 cast<llvm::Function>(LV.getPointer(*this)->stripPointerCasts());
3140 if (!Fn->getSubprogram())
3141 DI->EmitFunctionDecl(FD, FD->getLocation(), T, Fn);
3142 }
3143 }
3144
3145 return LV;
3146 }
3147
3148 // FIXME: While we're emitting a binding from an enclosing scope, all other
3149 // DeclRefExprs we see should be implicitly treated as if they also refer to
3150 // an enclosing scope.
3151 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3153 auto *FD = LambdaCaptureFields.lookup(BD);
3154 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3155 }
3156 return EmitLValue(BD->getBinding());
3157 }
3158
3159 // We can form DeclRefExprs naming GUID declarations when reconstituting
3160 // non-type template parameters into expressions.
3161 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3164
3165 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3166 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3167 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3168
3169 if (AS != T.getAddressSpace()) {
3170 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3171 auto PtrTy = ATPO.getElementType()->getPointerTo(TargetAS);
3173 CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);
3174 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3175 }
3176
3177 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3178 }
3179
3180 llvm_unreachable("Unhandled DeclRefExpr");
3181}
3182
3184 // __extension__ doesn't affect lvalue-ness.
3185 if (E->getOpcode() == UO_Extension)
3186 return EmitLValue(E->getSubExpr());
3187
3189 switch (E->getOpcode()) {
3190 default: llvm_unreachable("Unknown unary operator lvalue!");
3191 case UO_Deref: {
3193 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3194
3195 LValueBaseInfo BaseInfo;
3196 TBAAAccessInfo TBAAInfo;
3197 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
3198 &TBAAInfo);
3199 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3201
3202 // We should not generate __weak write barrier on indirect reference
3203 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3204 // But, we continue to generate __strong write barrier on indirect write
3205 // into a pointer to object.
3206 if (getLangOpts().ObjC &&
3207 getLangOpts().getGC() != LangOptions::NonGC &&
3208 LV.isObjCWeak())
3210 return LV;
3211 }
3212 case UO_Real:
3213 case UO_Imag: {
3214 LValue LV = EmitLValue(E->getSubExpr());
3215 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3216
3217 // __real is valid on scalars. This is a faster way of testing that.
3218 // __imag can only produce an rvalue on scalars.
3219 if (E->getOpcode() == UO_Real &&
3220 !LV.getAddress(*this).getElementType()->isStructTy()) {
3221 assert(E->getSubExpr()->getType()->isArithmeticType());
3222 return LV;
3223 }
3224
3225 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3226
3227 Address Component =
3228 (E->getOpcode() == UO_Real
3229 ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType())
3230 : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType()));
3231 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3233 ElemLV.getQuals().addQualifiers(LV.getQuals());
3234 return ElemLV;
3235 }
3236 case UO_PreInc:
3237 case UO_PreDec: {
3238 LValue LV = EmitLValue(E->getSubExpr());
3239 bool isInc = E->getOpcode() == UO_PreInc;
3240
3241 if (E->getType()->isAnyComplexType())
3242 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3243 else
3244 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3245 return LV;
3246 }
3247 }
3248}
3249
3253}
3254
3258}
3259
3261 auto SL = E->getFunctionName();
3262 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3263 StringRef FnName = CurFn->getName();
3264 if (FnName.starts_with("\01"))
3265 FnName = FnName.substr(1);
3266 StringRef NameItems[] = {
3268 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3269 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3270 std::string Name = std::string(SL->getString());
3271 if (!Name.empty()) {
3272 unsigned Discriminator =
3274 if (Discriminator)
3275 Name += "_" + Twine(Discriminator + 1).str();
3276 auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3278 } else {
3279 auto C =
3280 CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3282 }
3283 }
3284 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3286}
3287
3288/// Emit a type description suitable for use by a runtime sanitizer library. The
3289/// format of a type descriptor is
3290///
3291/// \code
3292/// { i16 TypeKind, i16 TypeInfo }
3293/// \endcode
3294///
3295/// followed by an array of i8 containing the type name. TypeKind is 0 for an
3296/// integer, 1 for a floating point value, and -1 for anything else.
3298 // Only emit each type's descriptor once.
3299 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3300 return C;
3301
3302 uint16_t TypeKind = -1;
3303 uint16_t TypeInfo = 0;
3304
3305 if (T->isIntegerType()) {
3306 TypeKind = 0;
3307 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3308 (T->isSignedIntegerType() ? 1 : 0);
3309 } else if (T->isFloatingType()) {
3310 TypeKind = 1;
3312 }
3313
3314 // Format the type name as if for a diagnostic, including quotes and
3315 // optionally an 'aka'.
3316 SmallString<32> Buffer;
3318 DiagnosticsEngine::ak_qualtype, (intptr_t)T.getAsOpaquePtr(), StringRef(),
3319 StringRef(), std::nullopt, Buffer, std::nullopt);
3320
3321 llvm::Constant *Components[] = {
3322 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3323 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3324 };
3325 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3326
3327 auto *GV = new llvm::GlobalVariable(
3328 CGM.getModule(), Descriptor->getType(),
3329 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3330 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3332
3333 // Remember the descriptor for this type.
3335
3336 return GV;
3337}
3338
3339llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3340 llvm::Type *TargetTy = IntPtrTy;
3341
3342 if (V->getType() == TargetTy)
3343 return V;
3344
3345 // Floating-point types which fit into intptr_t are bitcast to integers
3346 // and then passed directly (after zero-extension, if necessary).
3347 if (V->getType()->isFloatingPointTy()) {
3348 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3349 if (Bits <= TargetTy->getIntegerBitWidth())
3350 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3351 Bits));
3352 }
3353
3354 // Integers which fit in intptr_t are zero-extended and passed directly.
3355 if (V->getType()->isIntegerTy() &&
3356 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3357 return Builder.CreateZExt(V, TargetTy);
3358
3359 // Pointers are passed directly, everything else is passed by address.
3360 if (!V->getType()->isPointerTy()) {
3361 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3362 Builder.CreateStore(V, Ptr);
3363 V = Ptr.getPointer();
3364 }
3365 return Builder.CreatePtrToInt(V, TargetTy);
3366}
3367
3368/// Emit a representation of a SourceLocation for passing to a handler
3369/// in a sanitizer runtime library. The format for this data is:
3370/// \code
3371/// struct SourceLocation {
3372/// const char *Filename;
3373/// int32_t Line, Column;
3374/// };
3375/// \endcode
3376/// For an invalid SourceLocation, the Filename pointer is null.
3378 llvm::Constant *Filename;
3379 int Line, Column;
3380
3382 if (PLoc.isValid()) {
3383 StringRef FilenameString = PLoc.getFilename();
3384
3385 int PathComponentsToStrip =
3386 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3387 if (PathComponentsToStrip < 0) {
3388 assert(PathComponentsToStrip != INT_MIN);
3389 int PathComponentsToKeep = -PathComponentsToStrip;
3390 auto I = llvm::sys::path::rbegin(FilenameString);
3391 auto E = llvm::sys::path::rend(FilenameString);
3392 while (I != E && --PathComponentsToKeep)
3393 ++I;
3394
3395 FilenameString = FilenameString.substr(I - E);
3396 } else if (PathComponentsToStrip > 0) {
3397 auto I = llvm::sys::path::begin(FilenameString);
3398 auto E = llvm::sys::path::end(FilenameString);
3399 while (I != E && PathComponentsToStrip--)
3400 ++I;
3401
3402 if (I != E)
3403 FilenameString =
3404 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3405 else
3406 FilenameString = llvm::sys::path::filename(FilenameString);
3407 }
3408
3409 auto FilenameGV =
3410 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3412 cast<llvm::GlobalVariable>(
3413 FilenameGV.getPointer()->stripPointerCasts()));
3414 Filename = FilenameGV.getPointer();
3415 Line = PLoc.getLine();
3416 Column = PLoc.getColumn();
3417 } else {
3418 Filename = llvm::Constant::getNullValue(Int8PtrTy);
3419 Line = Column = 0;
3420 }
3421
3422 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3423 Builder.getInt32(Column)};
3424
3425 return llvm::ConstantStruct::getAnon(Data);
3426}
3427
3428namespace {
3429/// Specify under what conditions this check can be recovered
3430enum class CheckRecoverableKind {
3431 /// Always terminate program execution if this check fails.
3433 /// Check supports recovering, runtime has both fatal (noreturn) and
3434 /// non-fatal handlers for this check.
3435 Recoverable,
3436 /// Runtime conditionally aborts, always need to support recovery.
3438};
3439}
3440
3441static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
3442 assert(Kind.countPopulation() == 1);
3443 if (Kind == SanitizerKind::Vptr)
3444 return CheckRecoverableKind::AlwaysRecoverable;
3445 else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)
3446 return CheckRecoverableKind::Unrecoverable;
3447 else
3448 return CheckRecoverableKind::Recoverable;
3449}
3450
3451namespace {
3452struct SanitizerHandlerInfo {
3453 char const *const Name;
3454 unsigned Version;
3455};
3456}
3457
3458const SanitizerHandlerInfo SanitizerHandlers[] = {
3459#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3461#undef SANITIZER_CHECK
3462};
3463
3465 llvm::FunctionType *FnType,
3467 SanitizerHandler CheckHandler,
3468 CheckRecoverableKind RecoverKind, bool IsFatal,
3469 llvm::BasicBlock *ContBB) {
3470 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3471 std::optional<ApplyDebugLocation> DL;
3472 if (!CGF.Builder.getCurrentDebugLocation()) {
3473 // Ensure that the call has at least an artificial debug location.
3474 DL.emplace(CGF, SourceLocation());
3475 }
3476 bool NeedsAbortSuffix =
3477 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3478 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3479 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3480 const StringRef CheckName = CheckInfo.Name;
3481 std::string FnName = "__ubsan_handle_" + CheckName.str();
3482 if (CheckInfo.Version && !MinimalRuntime)
3483 FnName += "_v" + llvm::utostr(CheckInfo.Version);
3484 if (MinimalRuntime)
3485 FnName += "_minimal";
3486 if (NeedsAbortSuffix)
3487 FnName += "_abort";
3488 bool MayReturn =
3489 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3490
3491 llvm::AttrBuilder B(CGF.getLLVMContext());
3492 if (!MayReturn) {
3493 B.addAttribute(llvm::Attribute::NoReturn)
3494 .addAttribute(llvm::Attribute::NoUnwind);
3495 }
3496 B.addUWTableAttr(llvm::UWTableKind::Default);
3497
3498 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3499 FnType, FnName,
3500 llvm::AttributeList::get(CGF.getLLVMContext(),
3501 llvm::AttributeList::FunctionIndex, B),
3502 /*Local=*/true);
3503 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3504 if (!MayReturn) {
3505 HandlerCall->setDoesNotReturn();
3506 CGF.Builder.CreateUnreachable();
3507 } else {
3508 CGF.Builder.CreateBr(ContBB);
3509 }
3510}
3511
3513 ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
3514 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3515 ArrayRef<llvm::Value *> DynamicArgs) {
3516 assert(IsSanitizerScope);
3517 assert(Checked.size() > 0);
3518 assert(CheckHandler >= 0 &&
3519 size_t(CheckHandler) < std::size(SanitizerHandlers));
3520 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3521
3522 llvm::Value *FatalCond = nullptr;
3523 llvm::Value *RecoverableCond = nullptr;
3524 llvm::Value *TrapCond = nullptr;
3525 for (int i = 0, n = Checked.size(); i < n; ++i) {
3526 llvm::Value *Check = Checked[i].first;
3527 // -fsanitize-trap= overrides -fsanitize-recover=.
3528 llvm::Value *&Cond =
3529 CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
3530 ? TrapCond
3531 : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
3532 ? RecoverableCond
3533 : FatalCond;
3534 Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
3535 }
3536
3538 llvm::Value *Allow =
3539 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3540 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));
3541
3542 for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {
3543 if (*Cond)
3544 *Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));
3545 }
3546 }
3547
3548 if (TrapCond)
3549 EmitTrapCheck(TrapCond, CheckHandler);
3550 if (!FatalCond && !RecoverableCond)
3551 return;
3552
3553 llvm::Value *JointCond;
3554 if (FatalCond && RecoverableCond)
3555 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3556 else
3557 JointCond = FatalCond ? FatalCond : RecoverableCond;
3558 assert(JointCond);
3559
3560 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3561 assert(SanOpts.has(Checked[0].second));
3562#ifndef NDEBUG
3563 for (int i = 1, n = Checked.size(); i < n; ++i) {
3564 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3565 "All recoverable kinds in a single check must be same!");
3566 assert(SanOpts.has(Checked[i].second));
3567 }
3568#endif
3569
3570 llvm::BasicBlock *Cont = createBasicBlock("cont");
3571 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3572 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3573 // Give hint that we very much don't expect to execute the handler
3574 // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
3575 llvm::MDBuilder MDHelper(getLLVMContext());
3576 llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3577 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3578 EmitBlock(Handlers);
3579
3580 // Handler functions take an i8* pointing to the (handler-specific) static
3581 // information block, followed by a sequence of intptr_t arguments
3582 // representing operand values.
3585 if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3586 Args.reserve(DynamicArgs.size() + 1);
3587 ArgTypes.reserve(DynamicArgs.size() + 1);
3588
3589 // Emit handler arguments and create handler function type.
3590 if (!StaticArgs.empty()) {
3591 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3592 auto *InfoPtr = new llvm::GlobalVariable(
3593 CGM.getModule(), Info->getType(), false,
3594 llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
3595 llvm::GlobalVariable::NotThreadLocal,
3596 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3597 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3599 Args.push_back(InfoPtr);
3600 ArgTypes.push_back(Args.back()->getType());
3601 }
3602
3603 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3604 Args.push_back(EmitCheckValue(DynamicArgs[i]));
3605 ArgTypes.push_back(IntPtrTy);
3606 }
3607 }
3608
3609 llvm::FunctionType *FnType =
3610 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3611
3612 if (!FatalCond || !RecoverableCond) {
3613 // Simple case: we need to generate a single handler call, either
3614 // fatal, or non-fatal.
3615 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3616 (FatalCond != nullptr), Cont);
3617 } else {
3618 // Emit two handler calls: first one for set of unrecoverable checks,
3619 // another one for recoverable.
3620 llvm::BasicBlock *NonFatalHandlerBB =
3621 createBasicBlock("non_fatal." + CheckName);
3622 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3623 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3624 EmitBlock(FatalHandlerBB);
3625 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3626 NonFatalHandlerBB);
3627 EmitBlock(NonFatalHandlerBB);
3628 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3629 Cont);
3630 }
3631
3632 EmitBlock(Cont);
3633}
3634
3636 SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId,
3637 llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) {
3638 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3639
3640 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3641 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3642
3643 llvm::MDBuilder MDHelper(getLLVMContext());
3644 llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3645 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3646
3647 EmitBlock(CheckBB);
3648
3649 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
3650
3651 llvm::CallInst *CheckCall;
3652 llvm::FunctionCallee SlowPathFn;
3653 if (WithDiag) {
3654 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3655 auto *InfoPtr =
3656 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3657 llvm::GlobalVariable::PrivateLinkage, Info);
3658 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3660
3661 SlowPathFn = CGM.getModule().getOrInsertFunction(
3662 "__cfi_slowpath_diag",
3663 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3664 false));
3665 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3666 } else {
3667 SlowPathFn = CGM.getModule().getOrInsertFunction(
3668 "__cfi_slowpath",
3669 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3670 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3671 }
3672
3674 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3675 CheckCall->setDoesNotThrow();
3676
3677 EmitBlock(Cont);
3678}
3679
3680// Emit a stub for __cfi_check function so that the linker knows about this
3681// symbol in LTO mode.
3683 llvm::Module *M = &CGM.getModule();
3684 ASTContext &C = getContext();
3685 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
3686
3688 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
3689 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
3690 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
3692 FnArgs.push_back(&ArgCallsiteTypeId);
3693 FnArgs.push_back(&ArgAddr);
3694 FnArgs.push_back(&ArgCFICheckFailData);
3695 const CGFunctionInfo &FI =
3697
3698 llvm::Function *F = llvm::Function::Create(
3699 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
3700 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3701 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3703 F->setAlignment(llvm::Align(4096));
3704 CGM.setDSOLocal(F);
3705
3706 llvm::LLVMContext &Ctx = M->getContext();
3707 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3708 // CrossDSOCFI pass is not executed if there is no executable code.
3709 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
3710 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
3711 llvm::ReturnInst::Create(Ctx, nullptr, BB);
3712}
3713
3714// This function is basically a switch over the CFI failure kind, which is
3715// extracted from CFICheckFailData (1st function argument). Each case is either
3716// llvm.trap or a call to one of the two runtime handlers, based on
3717// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3718// failure kind) traps, but this should really never happen. CFICheckFailData
3719// can be nullptr if the calling module has -fsanitize-trap behavior for this
3720// check kind; in this case __cfi_check_fail traps as well.
3722 SanitizerScope SanScope(this);
3723 FunctionArgList Args;
3728 Args.push_back(&ArgData);
3729 Args.push_back(&ArgAddr);
3730
3731 const CGFunctionInfo &FI =
3733
3734 llvm::Function *F = llvm::Function::Create(
3735 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3736 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3737
3738 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3740 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3741
3742 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3743 SourceLocation());
3744
3745 // This function is not affected by NoSanitizeList. This function does
3746 // not have a source location, but "src:*" would still apply. Revert any
3747 // changes to SanOpts made in StartFunction.
3749
3750 llvm::Value *Data =
3751 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3752 CGM.getContext().VoidPtrTy, ArgData.getLocation());
3753 llvm::Value *Addr =
3754 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3755 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3756
3757 // Data == nullptr means the calling module has trap behaviour for this check.
3758 llvm::Value *DataIsNotNullPtr =
3759 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3760 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3761
3762 llvm::StructType *SourceLocationTy =
3763 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3764 llvm::StructType *CfiCheckFailDataTy =
3765 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3766
3767 llvm::Value *V = Builder.CreateConstGEP2_32(
3768 CfiCheckFailDataTy,
3769 Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
3770 0);
3771
3772 Address CheckKindAddr(V, Int8Ty, getIntAlign());
3773 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3774
3775 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3777 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3778 llvm::Value *ValidVtable = Builder.CreateZExt(
3779 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3780 {Addr, AllVtables}),
3781 IntPtrTy);
3782
3783 const std::pair<int, SanitizerMask> CheckKinds[] = {
3784 {CFITCK_VCall, SanitizerKind::CFIVCall},
3785 {CFITCK_NVCall, SanitizerKind::CFINVCall},
3786 {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast},
3787 {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast},
3788 {CFITCK_ICall, SanitizerKind::CFIICall}};
3789
3791 for (auto CheckKindMaskPair : CheckKinds) {
3792 int Kind = CheckKindMaskPair.first;
3793 SanitizerMask Mask = CheckKindMaskPair.second;
3794 llvm::Value *Cond =
3795 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3796 if (CGM.getLangOpts().Sanitize.has(Mask))
3797 EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
3798 {Data, Addr, ValidVtable});
3799 else
3800 EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3801 }
3802
3804 // The only reference to this function will be created during LTO link.
3805 // Make sure it survives until then.
3806 CGM.addUsedGlobal(F);
3807}
3808
3810 if (SanOpts.has(SanitizerKind::Unreachable)) {
3811 SanitizerScope SanScope(this);
3812 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3813 SanitizerKind::Unreachable),
3814 SanitizerHandler::BuiltinUnreachable,
3815 EmitCheckSourceLocation(Loc), std::nullopt);
3816 }
3817 Builder.CreateUnreachable();
3818}
3819
3820void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
3821 SanitizerHandler CheckHandlerID) {
3822 llvm::BasicBlock *Cont = createBasicBlock("cont");
3823
3824 // If we're optimizing, collapse all calls to trap down to just one per
3825 // check-type per function to save on code size.
3826 if ((int)TrapBBs.size() <= CheckHandlerID)
3827 TrapBBs.resize(CheckHandlerID + 1);
3828
3829 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3830
3832 CGM.getCodeGenOpts().OptimizationLevel && TrapBB &&
3833 (!CurCodeDecl || !CurCodeDecl->hasAttr<OptimizeNoneAttr>())) {
3834 auto Call = TrapBB->begin();
3835 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3836
3837 Call->applyMergedLocation(Call->getDebugLoc(),
3838 Builder.getCurrentDebugLocation());
3839 Builder.CreateCondBr(Checked, Cont, TrapBB);
3840 } else {
3841 TrapBB = createBasicBlock("trap");
3842 Builder.CreateCondBr(Checked, Cont, TrapBB);
3843 EmitBlock(TrapBB);
3844
3845 llvm::CallInst *TrapCall = Builder.CreateCall(
3846 CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3847 llvm::ConstantInt::get(CGM.Int8Ty, ClSanitizeDebugDeoptimization
3848 ? TrapBB->getParent()->size()
3849 : CheckHandlerID));
3850
3851 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3852 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3854 TrapCall->addFnAttr(A);
3855 }
3856 TrapCall->setDoesNotReturn();
3857 TrapCall->setDoesNotThrow();
3858 Builder.CreateUnreachable();
3859 }
3860
3861 EmitBlock(Cont);
3862}
3863
3864llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
3865 llvm::CallInst *TrapCall =
3866 Builder.CreateCall(CGM.getIntrinsic(IntrID));
3867
3868 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3869 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3871 TrapCall->addFnAttr(A);
3872 }
3873
3874 return TrapCall;
3875}
3876
3878 LValueBaseInfo *BaseInfo,
3879 TBAAAccessInfo *TBAAInfo) {
3880 assert(E->getType()->isArrayType() &&
3881 "Array to pointer decay must have array source type!");
3882
3883 // Expressions of array type can't be bitfields or vector elements.
3884 LValue LV = EmitLValue(E);
3885 Address Addr = LV.getAddress(*this);
3886
3887 // If the array type was an incomplete type, we need to make sure
3888 // the decay ends up being the right type.
3889 llvm::Type *NewTy = ConvertType(E->getType());
3890 Addr = Addr.withElementType(NewTy);
3891
3892 // Note that VLA pointers are always decayed, so we don't need to do
3893 // anything here.
3894 if (!E->getType()->isVariableArrayType()) {
3895 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3896 "Expected pointer to array");
3897 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3898 }
3899
3900 // The result of this decay conversion points to an array element within the
3901 // base lvalue. However, since TBAA currently does not support representing
3902 // accesses to elements of member arrays, we conservatively represent accesses
3903 // to the pointee object as if it had no any base lvalue specified.
3904 // TODO: Support TBAA for member arrays.
3906 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
3907 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
3908
3909 return Addr.withElementType(ConvertTypeForMem(EltType));
3910}
3911
3912/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3913/// array to pointer, return the array subexpression.
3914static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
3915 // If this isn't just an array->pointer decay, bail out.
3916 const auto *CE = dyn_cast<CastExpr>(E);
3917 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
3918 return nullptr;
3919
3920 // If this is a decay from variable width array, bail out.
3921 const Expr *SubExpr = CE->getSubExpr();
3922 if (SubExpr->getType()->isVariableArrayType())
3923 return nullptr;
3924
3925 return SubExpr;
3926}
3927
3929 llvm::Type *elemType,
3930 llvm::Value *ptr,
3931 ArrayRef<llvm::Value*> indices,
3932 bool inbounds,
3933 bool signedIndices,
3934 SourceLocation loc,
3935 const llvm::Twine &name = "arrayidx") {
3936 if (inbounds) {
3937 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
3939 name);
3940 } else {
3941 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
3942 }
3943}
3944
3947 llvm::Type *elementType, bool inbounds,
3948 bool signedIndices, SourceLocation loc,
3949 CharUnits align,
3950 const llvm::Twine &name = "arrayidx") {
3951 if (inbounds) {
3952 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
3954 align, name);
3955 } else {
3956 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
3957 }
3958}
3959
3961 llvm::Value *idx,
3962 CharUnits eltSize) {
3963 // If we have a constant index, we can use the exact offset of the
3964 // element we're accessing.
3965 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
3966 CharUnits offset = constantIdx->getZExtValue() * eltSize;
3967 return arrayAlign.alignmentAtOffset(offset);
3968
3969 // Otherwise, use the worst-case alignment for any element.
3970 } else {
3971 return arrayAlign.alignmentOfArrayElement(eltSize);
3972 }
3973}
3974
3976 const VariableArrayType *vla) {
3977 QualType eltType;
3978 do {
3979 eltType = vla->getElementType();
3980 } while ((vla = ctx.getAsVariableArrayType(eltType)));
3981 return eltType;
3982}
3983
3985 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
3986}
3987
3988static bool hasBPFPreserveStaticOffset(const Expr *E) {
3989 if (!E)
3990 return false;
3991 QualType PointeeType = E->getType()->getPointeeType();
3992 if (PointeeType.isNull())
3993 return false;
3994 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
3995 return hasBPFPreserveStaticOffset(BaseDecl);
3996 return false;
3997}
3998
3999// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4001 Address &Addr) {
4002 if (!CGF.getTarget().getTriple().isBPF())
4003 return Addr;
4004
4005 llvm::Function *Fn =
4006 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4007 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4008 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4009}
4010
4011/// Given an array base, check whether its member access belongs to a record
4012/// with preserve_access_index attribute or not.
4013static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4014 if (!ArrayBase || !CGF.getDebugInfo())
4015 return false;
4016
4017 // Only support base as either a MemberExpr or DeclRefExpr.
4018 // DeclRefExpr to cover cases like:
4019 // struct s { int a; int b[10]; };
4020 // struct s *p;
4021 // p[1].a
4022 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4023 // p->b[5] is a MemberExpr example.
4024 const Expr *E = ArrayBase->IgnoreImpCasts();
4025 if (const auto *ME = dyn_cast<MemberExpr>(E))
4026 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4027
4028 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4029 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4030 if (!VarDef)
4031 return false;
4032
4033 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4034 if (!PtrT)
4035 return false;
4036
4037 const auto *PointeeT = PtrT->getPointeeType()
4039 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4040 return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4041 return false;
4042 }
4043
4044 return false;
4045}
4046
4049 QualType eltType, bool inbounds,
4050 bool signedIndices, SourceLocation loc,
4051 QualType *arrayType = nullptr,
4052 const Expr *Base = nullptr,
4053 const llvm::Twine &name = "arrayidx") {
4054 // All the indices except that last must be zero.
4055#ifndef NDEBUG
4056 for (auto *idx : indices.drop_back())
4057 assert(isa<llvm::ConstantInt>(idx) &&
4058 cast<llvm::ConstantInt>(idx)->isZero());
4059#endif
4060
4061 // Determine the element size of the statically-sized base. This is
4062 // the thing that the indices are expressed in terms of.
4063 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4064 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4065 }
4066
4067 // We can use that to compute the best alignment of the element.
4068 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4069 CharUnits eltAlign =
4070 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4071
4073 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4074
4075 llvm::Value *eltPtr;
4076 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4077 if (!LastIndex ||
4079 addr = emitArraySubscriptGEP(CGF, addr, indices,
4080 CGF.ConvertTypeForMem(eltType), inbounds,
4081 signedIndices, loc, eltAlign, name);
4082 return addr;
4083 } else {
4084 // Remember the original array subscript for bpf target
4085 unsigned idx = LastIndex->getZExtValue();
4086 llvm::DIType *DbgInfo = nullptr;
4087 if (arrayType)
4088 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4089 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4090 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4091 idx, DbgInfo);
4092 }
4093
4094 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4095}
4096
4097/// The offset of a field from the beginning of the record.
4099 const FieldDecl *FD, int64_t &Offset) {
4100 ASTContext &Ctx = CGF.getContext();
4101 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4102 unsigned FieldNo = 0;
4103
4104 for (const Decl *D : RD->decls()) {
4105 if (const auto *Record = dyn_cast<RecordDecl>(D))
4106 if (getFieldOffsetInBits(CGF, Record, FD, Offset)) {
4107 Offset += Layout.getFieldOffset(FieldNo);
4108 return true;
4109 }
4110
4111 if (const auto *Field = dyn_cast<FieldDecl>(D))
4112 if (FD == Field) {
4113 Offset += Layout.getFieldOffset(FieldNo);
4114 return true;
4115 }
4116
4117 if (isa<FieldDecl>(D))
4118 ++FieldNo;
4119 }
4120
4121 return false;
4122}
4123
4124/// Returns the relative offset difference between \p FD1 and \p FD2.
4125/// \code
4126/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4127/// \endcode
4128/// Both fields must be within the same struct.
4129static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4130 const FieldDecl *FD1,
4131 const FieldDecl *FD2) {
4132 const RecordDecl *FD1OuterRec =
4134 const RecordDecl *FD2OuterRec =
4136
4137 if (FD1OuterRec != FD2OuterRec)
4138 // Fields must be within the same RecordDecl.
4139 return std::optional<int64_t>();
4140
4141 int64_t FD1Offset = 0;
4142 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4143 return std::optional<int64_t>();
4144
4145 int64_t FD2Offset = 0;
4146 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4147 return std::optional<int64_t>();
4148
4149 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4150}
4151
4153 bool Accessed) {
4154 // The index must always be an integer, which is not an aggregate. Emit it
4155 // in lexical order (this complexity is, sadly, required by C++17).
4156 llvm::Value *IdxPre =
4157 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4158 bool SignedIndices = false;
4159 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4160 auto *Idx = IdxPre;
4161 if (E->getLHS() != E->getIdx()) {
4162 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4163 Idx = EmitScalarExpr(E->getIdx());
4164 }
4165
4166 QualType IdxTy = E->getIdx()->getType();
4167 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4168 SignedIndices |= IdxSigned;
4169
4170 if (SanOpts.has(SanitizerKind::ArrayBounds))
4171 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4172
4173 // Extend or truncate the index type to 32 or 64-bits.
4174 if (Promote && Idx->getType() != IntPtrTy)
4175 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4176
4177 return Idx;
4178 };
4179 IdxPre = nullptr;
4180
4181 // If the base is a vector type, then we are forming a vector element lvalue
4182 // with this subscript.
4183 if (E->getBase()->getType()->isVectorType() &&
4184 !isa<ExtVectorElementExpr>(E->getBase())) {
4185 // Emit the vector as an lvalue to get its address.
4186 LValue LHS = EmitLValue(E->getBase());
4187 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4188 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4189 return LValue::MakeVectorElt(LHS.getAddress(*this), Idx,
4190 E->getBase()->getType(), LHS.getBaseInfo(),
4191 TBAAAccessInfo());
4192 }
4193
4194 // All the other cases basically behave like simple offsetting.
4195
4196 // Handle the extvector case we ignored above.
4197 if (isa<ExtVectorElementExpr>(E->getBase())) {
4198 LValue LV = EmitLValue(E->getBase());
4199 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4201
4202 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4203 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4204 SignedIndices, E->getExprLoc());
4205 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4206 CGM.getTBAAInfoForSubobject(LV, EltType));
4207 }
4208
4209 LValueBaseInfo EltBaseInfo;
4210 TBAAAccessInfo EltTBAAInfo;
4211 Address Addr = Address::invalid();
4212 if (const VariableArrayType *vla =
4213 getContext().getAsVariableArrayType(E->getType())) {
4214 // The base must be a pointer, which is not an aggregate. Emit
4215 // it. It needs to be emitted first in case it's what captures
4216 // the VLA bounds.
4217 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4218 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4219
4220 // The element count here is the total number of non-VLA elements.
4221 llvm::Value *numElements = getVLASize(vla).NumElts;
4222
4223 // Effectively, the multiply by the VLA size is part of the GEP.
4224 // GEP indexes are signed, and scaling an index isn't permitted to
4225 // signed-overflow, so we use the same semantics for our explicit
4226 // multiply. We suppress this if overflow is not undefined behavior.
4227 if (getLangOpts().isSignedOverflowDefined()) {
4228 Idx = Builder.CreateMul(Idx, numElements);
4229 } else {
4230 Idx = Builder.CreateNSWMul(Idx, numElements);
4231 }
4232
4233 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4234 !getLangOpts().isSignedOverflowDefined(),
4235 SignedIndices, E->getExprLoc());
4236
4237 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4238 // Indexing over an interface, as in "NSString *P; P[4];"
4239
4240 // Emit the base pointer.
4241 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4242 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4243
4244 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4245 llvm::Value *InterfaceSizeVal =
4246 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4247
4248 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4249
4250 // We don't necessarily build correct LLVM struct types for ObjC
4251 // interfaces, so we can't rely on GEP to do this scaling
4252 // correctly, so we need to cast to i8*. FIXME: is this actually
4253 // true? A lot of other things in the fragile ABI would break...
4254 llvm::Type *OrigBaseElemTy = Addr.getElementType();
4255
4256 // Do the GEP.
4257 CharUnits EltAlign =
4258 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4259 llvm::Value *EltPtr =
4260 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4261 ScaledIdx, false, SignedIndices, E->getExprLoc());
4262 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4263 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4264 // If this is A[i] where A is an array, the frontend will have decayed the
4265 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4266 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4267 // "gep x, i" here. Emit one "gep A, 0, i".
4268 assert(Array->getType()->isArrayType() &&
4269 "Array to pointer decay must have array source type!");
4270 LValue ArrayLV;
4271 // For simple multidimensional array indexing, set the 'accessed' flag for
4272 // better bounds-checking of the base expression.
4273 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4274 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4275 else
4276 ArrayLV = EmitLValue(Array);
4277 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4278
4279 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4280 // If the array being accessed has a "counted_by" attribute, generate
4281 // bounds checking code. The "count" field is at the top level of the
4282 // struct or in an anonymous struct, that's also at the top level. Future
4283 // expansions may allow the "count" to reside at any place in the struct,
4284 // but the value of "counted_by" will be a "simple" path to the count,
4285 // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4286 // similar to emit the correct GEP.
4287 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4288 getLangOpts().getStrictFlexArraysLevel();
4289
4290 if (const auto *ME = dyn_cast<MemberExpr>(Array);
4291 ME &&
4292 ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
4294 const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(ME->getMemberDecl());
4295 if (const FieldDecl *CountFD = FindCountedByField(FAMDecl)) {
4296 if (std::optional<int64_t> Diff =
4297 getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {
4298 CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);
4299
4300 // Create a GEP with a byte offset between the FAM and count and
4301 // use that to load the count value.
4303 ArrayLV.getAddress(*this), Int8PtrTy, Int8Ty);
4304
4305 llvm::Type *CountTy = ConvertType(CountFD->getType());
4306 llvm::Value *Res = Builder.CreateInBoundsGEP(
4307 Int8Ty, Addr.emitRawPointer(*this),
4308 Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
4309 Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4310 ".counted_by.load");
4311
4312 // Now emit the bounds checking.
4313 EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),
4314 Array->getType(), Accessed);
4315 }
4316 }
4317 }
4318 }
4319
4320 // Propagate the alignment from the array itself to the result.
4321 QualType arrayType = Array->getType();
4322 Addr = emitArraySubscriptGEP(
4323 *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
4324 E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
4325 E->getExprLoc(), &arrayType, E->getBase());
4326 EltBaseInfo = ArrayLV.getBaseInfo();
4327 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4328 } else {
4329 // The base must be a pointer; emit it with an estimate of its alignment.
4330 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4331 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4332 QualType ptrType = E->getBase()->getType();
4333 Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
4334 !getLangOpts().isSignedOverflowDefined(),
4335 SignedIndices, E->getExprLoc(), &ptrType,
4336 E->getBase());
4337 }
4338
4339 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4340
4341 if (getLangOpts().ObjC &&
4342 getLangOpts().getGC() != LangOptions::NonGC) {
4345 }
4346 return LV;
4347}
4348
4350 assert(
4351 !E->isIncomplete() &&
4352 "incomplete matrix subscript expressions should be rejected during Sema");
4353 LValue Base = EmitLValue(E->getBase());
4354 llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx());
4355 llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());
4356 llvm::Value *NumRows = Builder.getIntN(
4357 RowIdx->getType()->getScalarSizeInBits(),
4359 llvm::Value *FinalIdx =
4360 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4361 return LValue::MakeMatrixElt(
4362 MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx,
4363 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4364}
4365
4367 LValueBaseInfo &BaseInfo,
4368 TBAAAccessInfo &TBAAInfo,
4369 QualType BaseTy, QualType ElTy,
4370 bool IsLowerBound) {
4371 LValue BaseLVal;
4372 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4373 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4374 if (BaseTy->isArrayType()) {
4375 Address Addr = BaseLVal.getAddress(CGF);
4376 BaseInfo = BaseLVal.getBaseInfo();
4377
4378 // If the array type was an incomplete type, we need to make sure
4379 // the decay ends up being the right type.
4380 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4381 Addr = Addr.withElementType(NewTy);
4382
4383 // Note that VLA pointers are always decayed, so we don't need to do
4384 // anything here.
4385 if (!BaseTy->isVariableArrayType()) {
4386 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4387 "Expected pointer to array");
4388 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4389 }
4390
4391 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4392 }
4393 LValueBaseInfo TypeBaseInfo;
4394 TBAAAccessInfo TypeTBAAInfo;
4395 CharUnits Align =
4396 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4397 BaseInfo.mergeForCast(TypeBaseInfo);
4398 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4399 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)),
4400 CGF.ConvertTypeForMem(ElTy), Align);
4401 }
4402 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4403}
4404
4406 bool IsLowerBound) {
4407
4408 assert(!E->isOpenACCArraySection() &&
4409 "OpenACC Array section codegen not implemented");
4410
4412 QualType ResultExprTy;
4413 if (auto *AT = getContext().getAsArrayType(BaseTy))
4414 ResultExprTy = AT->getElementType();
4415 else
4416 ResultExprTy = BaseTy->getPointeeType();
4417 llvm::Value *Idx = nullptr;
4418 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4419 // Requesting lower bound or upper bound, but without provided length and
4420 // without ':' symbol for the default length -> length = 1.
4421 // Idx = LowerBound ?: 0;
4422 if (auto *LowerBound = E->getLowerBound()) {
4423 Idx = Builder.CreateIntCast(
4424 EmitScalarExpr(LowerBound), IntPtrTy,
4425 LowerBound->getType()->hasSignedIntegerRepresentation());
4426 } else
4427 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4428 } else {
4429 // Try to emit length or lower bound as constant. If this is possible, 1
4430 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4431 // IR (LB + Len) - 1.
4432 auto &C = CGM.getContext();
4433 auto *Length = E->getLength();
4434 llvm::APSInt ConstLength;
4435 if (Length) {
4436 // Idx = LowerBound + Length - 1;
4437 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4438 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4439 Length = nullptr;
4440 }
4441 auto *LowerBound = E->getLowerBound();
4442 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4443 if (LowerBound) {
4444 if (std::optional<llvm::APSInt> LB =
4445 LowerBound->getIntegerConstantExpr(C)) {
4446 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4447 LowerBound = nullptr;
4448 }
4449 }
4450 if (!Length)
4451 --ConstLength;
4452 else if (!LowerBound)
4453 --ConstLowerBound;
4454
4455 if (Length || LowerBound) {
4456 auto *LowerBoundVal =
4457 LowerBound
4458 ? Builder.CreateIntCast(
4459 EmitScalarExpr(LowerBound), IntPtrTy,
4460 LowerBound->getType()->hasSignedIntegerRepresentation())
4461 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4462 auto *LengthVal =
4463 Length
4464 ? Builder.CreateIntCast(
4465 EmitScalarExpr(Length), IntPtrTy,
4466 Length->getType()->hasSignedIntegerRepresentation())
4467 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4468 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4469 /*HasNUW=*/false,
4470 !getLangOpts().isSignedOverflowDefined());
4471 if (Length && LowerBound) {
4472 Idx = Builder.CreateSub(
4473 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4474 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4475 }
4476 } else
4477 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4478 } else {
4479 // Idx = ArraySize - 1;
4480 QualType ArrayTy = BaseTy->isPointerType()
4482 : BaseTy;
4483 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4484 Length = VAT->getSizeExpr();
4485 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4486 ConstLength = *L;
4487 Length = nullptr;
4488 }
4489 } else {
4490 auto *CAT = C.getAsConstantArrayType(ArrayTy);
4491 assert(CAT && "unexpected type for array initializer");
4492 ConstLength = CAT->getSize();
4493 }
4494 if (Length) {
4495 auto *LengthVal = Builder.CreateIntCast(
4496 EmitScalarExpr(Length), IntPtrTy,
4497 Length->getType()->hasSignedIntegerRepresentation());
4498 Idx = Builder.CreateSub(
4499 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4500 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4501 } else {
4502 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4503 --ConstLength;
4504 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4505 }
4506 }
4507 }
4508 assert(Idx);
4509
4510 Address EltPtr = Address::invalid();
4511 LValueBaseInfo BaseInfo;
4512 TBAAAccessInfo TBAAInfo;
4513 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4514 // The base must be a pointer, which is not an aggregate. Emit
4515 // it. It needs to be emitted first in case it's what captures
4516 // the VLA bounds.
4517 Address Base =
4518 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4519 BaseTy, VLA->getElementType(), IsLowerBound);
4520 // The element count here is the total number of non-VLA elements.
4521 llvm::Value *NumElements = getVLASize(VLA).NumElts;
4522
4523 // Effectively, the multiply by the VLA size is part of the GEP.
4524 // GEP indexes are signed, and scaling an index isn't permitted to
4525 // signed-overflow, so we use the same semantics for our explicit
4526 // multiply. We suppress this if overflow is not undefined behavior.
4527 if (getLangOpts().isSignedOverflowDefined())
4528 Idx = Builder.CreateMul(Idx, NumElements);
4529 else
4530 Idx = Builder.CreateNSWMul(Idx, NumElements);
4531 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4532 !getLangOpts().isSignedOverflowDefined(),
4533 /*signedIndices=*/false, E->getExprLoc());
4534 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4535 // If this is A[i] where A is an array, the frontend will have decayed the
4536 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4537 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4538 // "gep x, i" here. Emit one "gep A, 0, i".
4539 assert(Array->getType()->isArrayType() &&
4540 "Array to pointer decay must have array source type!");
4541 LValue ArrayLV;
4542 // For simple multidimensional array indexing, set the 'accessed' flag for
4543 // better bounds-checking of the base expression.
4544 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4545 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4546 else
4547 ArrayLV = EmitLValue(Array);
4548
4549 // Propagate the alignment from the array itself to the result.
4550 EltPtr = emitArraySubscriptGEP(
4551 *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
4552 ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
4553 /*signedIndices=*/false, E->getExprLoc());
4554 BaseInfo = ArrayLV.getBaseInfo();
4555 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4556 } else {
4557 Address Base =
4558 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
4559 ResultExprTy, IsLowerBound);
4560 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4561 !getLangOpts().isSignedOverflowDefined(),
4562 /*signedIndices=*/false, E->getExprLoc());
4563 }
4564
4565 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4566}
4567
4570 // Emit the base vector as an l-value.
4571 LValue Base;
4572
4573 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4574 if (E->isArrow()) {
4575 // If it is a pointer to a vector, emit the address and form an lvalue with
4576 // it.
4577 LValueBaseInfo BaseInfo;
4578 TBAAAccessInfo TBAAInfo;
4579 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4580 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4581 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4582 Base.getQuals().removeObjCGCAttr();
4583 } else if (E->getBase()->isGLValue()) {
4584 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4585 // emit the base as an lvalue.
4586 assert(E->getBase()->getType()->isVectorType());
4587 Base = EmitLValue(E->getBase());
4588 } else {
4589 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4590 assert(E->getBase()->getType()->isVectorType() &&
4591 "Result must be a vector");
4592 llvm::Value *Vec = EmitScalarExpr(E->getBase());
4593
4594 // Store the vector to memory (because LValue wants an address).
4595 Address VecMem = CreateMemTemp(E->getBase()->getType());
4596 Builder.CreateStore(Vec, VecMem);
4597 Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4599 }
4600
4601 QualType type =
4602 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4603
4604 // Encode the element access list into a vector of unsigned indices.
4606 E->getEncodedElementAccess(Indices);
4607
4608 if (Base.isSimple()) {
4609 llvm::Constant *CV =
4610 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4611 return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type,
4612 Base.getBaseInfo(), TBAAAccessInfo());
4613 }
4614 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4615
4616 llvm::Constant *BaseElts = Base.getExtVectorElts();
4618
4619 for (unsigned i = 0, e = Indices.size(); i != e; ++i)
4620 CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4621 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4622 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4623 Base.getBaseInfo(), TBAAAccessInfo());
4624}
4625
4627 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
4629 return EmitDeclRefLValue(DRE);
4630 }
4631
4632 Expr *BaseExpr = E->getBase();
4633 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4634 LValue BaseLV;
4635 if (E->isArrow()) {
4636 LValueBaseInfo BaseInfo;
4637 TBAAAccessInfo TBAAInfo;
4638 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4639 QualType PtrTy = BaseExpr->getType()->getPointeeType();
4640 SanitizerSet SkippedChecks;
4641 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4642 if (IsBaseCXXThis)
4643 SkippedChecks.set(SanitizerKind::Alignment, true);
4644 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4645 SkippedChecks.set(SanitizerKind::Null, true);
4646 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,
4647 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4648 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4649 } else
4650 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4651
4652 NamedDecl *ND = E->getMemberDecl();
4653 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4654 LValue LV = EmitLValueForField(BaseLV, Field);
4656 if (getLangOpts().OpenMP) {
4657 // If the member was explicitly marked as nontemporal, mark it as
4658 // nontemporal. If the base lvalue is marked as nontemporal, mark access
4659 // to children as nontemporal too.
4660 if ((IsWrappedCXXThis(BaseExpr) &&
4662 BaseLV.isNontemporal())
4663 LV.setNontemporal(/*Value=*/true);
4664 }
4665 return LV;
4666 }
4667
4668 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4669 return EmitFunctionDeclLValue(*this, E, FD);
4670
4671 llvm_unreachable("Unhandled member declaration!");
4672}
4673
4674/// Given that we are currently emitting a lambda, emit an l-value for
4675/// one of its members.
4676///
4678 llvm::Value *ThisValue) {
4679 bool HasExplicitObjectParameter = false;
4680 if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl)) {
4681 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
4682 assert(MD->getParent()->isLambda());
4683 assert(MD->getParent() == Field->getParent());
4684 }
4685 LValue LambdaLV;
4686 if (HasExplicitObjectParameter) {
4687 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
4688 auto It = LocalDeclMap.find(D);
4689 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
4690 Address AddrOfExplicitObject = It->getSecond();
4691 if (D->getType()->isReferenceType())
4692 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
4694 else
4695 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
4697 } else {
4698 QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
4699 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
4700 }
4701 return EmitLValueForField(LambdaLV, Field);
4702}
4703
4705 return EmitLValueForLambdaField(Field, CXXABIThisValue);
4706}
4707
4708/// Get the field index in the debug info. The debug info structure/union
4709/// will ignore the unnamed bitfields.
4711 unsigned FieldIndex) {
4712 unsigned I = 0, Skipped = 0;
4713
4714 for (auto *F : Rec->getDefinition()->fields()) {
4715 if (I == FieldIndex)
4716 break;
4717 if (F->isUnnamedBitField())
4718 Skipped++;
4719 I++;
4720 }
4721
4722 return FieldIndex - Skipped;
4723}
4724
4725/// Get the address of a zero-sized field within a record. The resulting
4726/// address doesn't necessarily have the right type.
4728 const FieldDecl *Field) {
4730 CGF.getContext().getFieldOffset(Field));
4731 if (Offset.isZero())
4732 return Base;
4733 Base = Base.withElementType(CGF.Int8Ty);
4734 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
4735}
4736
4737/// Drill down to the storage of a field without walking into
4738/// reference types.
4739///
4740/// The resulting address doesn't necessarily have the right type.
4742 const FieldDecl *field) {
4743 if (field->isZeroSize(CGF.getContext()))
4744 return emitAddrOfZeroSizeField(CGF, base, field);
4745
4746 const RecordDecl *rec = field->getParent();
4747
4748 unsigned idx =
4749 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4750
4751 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4752}
4753
4755 Address addr, const FieldDecl *field) {
4756 const RecordDecl *rec = field->getParent();
4757 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4758 base.getType(), rec->getLocation());
4759
4760 unsigned idx =
4761 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4762
4764 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4765}
4766
4767static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4768 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4769 if (!RD)
4770 return false;
4771
4772 if (RD->isDynamicClass())
4773 return true;
4774
4775 for (const auto &Base : RD->bases())
4776 if (hasAnyVptr(Base.getType(), Context))
4777 return true;
4778
4779 for (const FieldDecl *Field : RD->fields())
4780 if (hasAnyVptr(Field->getType(), Context))
4781 return true;
4782
4783 return false;
4784}
4785
4787 const FieldDecl *field) {
4788 LValueBaseInfo BaseInfo = base.getBaseInfo();
4789
4790 if (field->isBitField()) {
4791 const CGRecordLayout &RL =
4793 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4794 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
4795 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
4796 Info.VolatileStorageSize != 0 &&
4797 field->getType()
4800 Address Addr = base.getAddress(*this);
4801 unsigned Idx = RL.getLLVMFieldNo(field);
4802 const RecordDecl *rec = field->getParent();
4804 Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);
4805 if (!UseVolatile) {
4806 if (!IsInPreservedAIRegion &&
4807 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4808 if (Idx != 0)
4809 // For structs, we GEP to the field that the record layout suggests.
4810 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4811 } else {
4812 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4813 getContext().getRecordType(rec), rec->getLocation());
4815 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
4816 DbgInfo);
4817 }
4818 }
4819 const unsigned SS =
4820 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
4821 // Get the access type.
4822 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
4823 Addr = Addr.withElementType(FieldIntTy);
4824 if (UseVolatile) {
4825 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
4826 if (VolatileOffset)
4827 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
4828 }
4829
4830 QualType fieldType =
4831 field->getType().withCVRQualifiers(base.getVRQualifiers());
4832 // TODO: Support TBAA for bit fields.
4833 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4834 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4835 TBAAAccessInfo());
4836 }
4837
4838 // Fields of may-alias structures are may-alias themselves.
4839 // FIXME: this should get propagated down through anonymous structs
4840 // and unions.
4841 QualType FieldType = field->getType();
4842 const RecordDecl *rec = field->getParent();
4843 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4844 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4845 TBAAAccessInfo FieldTBAAInfo;
4846 if (base.getTBAAInfo().isMayAlias() ||
4847 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
4848 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4849 } else if (rec->isUnion()) {
4850 // TODO: Support TBAA for unions.
4851 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4852 } else {
4853 // If no base type been assigned for the base access, then try to generate
4854 // one for this base lvalue.
4855 FieldTBAAInfo = base.getTBAAInfo();
4856 if (!FieldTBAAInfo.BaseType) {
4857 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4858 assert(!FieldTBAAInfo.Offset &&
4859 "Nonzero offset for an access with no base type!");
4860 }
4861
4862 // Adjust offset to be relative to the base type.
4863 const ASTRecordLayout &Layout =
4865 unsigned CharWidth = getContext().getCharWidth();
4866 if (FieldTBAAInfo.BaseType)
4867 FieldTBAAInfo.Offset +=
4868 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4869
4870 // Update the final access type and size.
4871 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4872 FieldTBAAInfo.Size =
4874 }
4875
4876 Address addr = base.getAddress(*this);
4878 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
4879 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
4880 if (CGM.getCodeGenOpts().StrictVTablePointers &&
4881 ClassDef->isDynamicClass()) {
4882 // Getting to any field of dynamic object requires stripping dynamic
4883 // information provided by invariant.group. This is because accessing
4884 // fields may leak the real address of dynamic object, which could result
4885 // in miscompilation when leaked pointer would be compared.
4886 auto *stripped =
4888 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
4889 }
4890 }
4891
4892 unsigned RecordCVR = base.getVRQualifiers();
4893 if (rec->isUnion()) {
4894 // For unions, there is no pointer adjustment.
4895 if (CGM.getCodeGenOpts().StrictVTablePointers &&
4896 hasAnyVptr(FieldType, getContext()))
4897 // Because unions can easily skip invariant.barriers, we need to add
4898 // a barrier every time CXXRecord field with vptr is referenced.
4900
4902 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4903 // Remember the original union field index
4904 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
4905 rec->getLocation());
4906 addr =
4908 addr.emitRawPointer(*this),
4909 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
4910 addr.getElementType(), addr.getAlignment());
4911 }
4912
4913 if (FieldType->isReferenceType())
4914 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
4915 } else {
4916 if (!IsInPreservedAIRegion &&
4917 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
4918 // For structs, we GEP to the field that the record layout suggests.
4919 addr = emitAddrOfFieldStorage(*this, addr, field);
4920 else
4921 // Remember the original struct field index
4922 addr = emitPreserveStructAccess(*this, base, addr, field);
4923 }
4924
4925 // If this is a reference field, load the reference right now.
4926 if (FieldType->isReferenceType()) {
4927 LValue RefLVal =
4928 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4929 if (RecordCVR & Qualifiers::Volatile)
4930 RefLVal.getQuals().addVolatile();
4931 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
4932
4933 // Qualifiers on the struct don't apply to the referencee.
4934 RecordCVR = 0;
4935 FieldType = FieldType->getPointeeType();
4936 }
4937
4938 // Make sure that the address is pointing to the right type. This is critical
4939 // for both unions and structs.
4940 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
4941
4942 if (field->hasAttr<AnnotateAttr>())
4943 addr = EmitFieldAnnotations(field, addr);
4944
4945 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4946 LV.getQuals().addCVRQualifiers(RecordCVR);
4947
4948 // __weak attribute on a field is ignored.
4951
4952 return LV;
4953}
4954
4955LValue
4957 const FieldDecl *Field) {
4958 QualType FieldType = Field->getType();
4959
4960 if (!FieldType->isReferenceType())
4961 return EmitLValueForField(Base, Field);
4962
4963 Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field);
4964
4965 // Make sure that the address is pointing to the right type.
4966 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
4967 V = V.withElementType(llvmType);
4968
4969 // TODO: Generate TBAA information that describes this access as a structure
4970 // member access and not just an access to an object of the field's type. This
4971 // should be similar to what we do in EmitLValueForField().
4972 LValueBaseInfo BaseInfo = Base.getBaseInfo();
4973 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
4974 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
4975 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
4976 CGM.getTBAAInfoForSubobject(Base, FieldType));
4977}
4978
4980 if (E->isFileScope()) {
4982 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
4983 }
4984 if (E->getType()->isVariablyModifiedType())
4985 // make sure to emit the VLA size.
4987
4988 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
4989 const Expr *InitExpr = E->getInitializer();
4991
4992 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
4993 /*Init*/ true);
4994
4995 // Block-scope compound literals are destroyed at the end of the enclosing
4996 // scope in C.
4997 if (!getLangOpts().CPlusPlus)
5000 E->getType(), getDestroyer(DtorKind),
5001 DtorKind & EHCleanup);
5002
5003 return Result;
5004}
5005
5007 if (!E->isGLValue())
5008 // Initializing an aggregate temporary in C++11: T{...}.
5009 return EmitAggExprToLValue(E);
5010
5011 // An lvalue initializer list must be initializing a reference.
5012 assert(E->isTransparent() && "non-transparent glvalue init list");
5013 return EmitLValue(E->getInit(0));
5014}
5015
5016/// Emit the operand of a glvalue conditional operator. This is either a glvalue
5017/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5018/// LValue is returned and the current block has been terminated.
5019static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5020 const Expr *Operand) {
5021 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5022 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5023 return std::nullopt;
5024 }
5025
5026 return CGF.EmitLValue(Operand);
5027}
5028
5029namespace {
5030// Handle the case where the condition is a constant evaluatable simple integer,
5031// which means we don't have to separately handle the true/false blocks.
5032std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5034 const Expr *condExpr = E->getCond();
5035 bool CondExprBool;
5036 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5037 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5038 if (!CondExprBool)
5039 std::swap(Live, Dead);
5040
5041 if (!CGF.ContainsLabel(Dead)) {
5042 // If the true case is live, we need to track its region.
5043 if (CondExprBool)
5045 // If a throw expression we emit it and return an undefined lvalue
5046 // because it can't be used.
5047 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5048 CGF.EmitCXXThrowExpr(ThrowExpr);
5049 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5050 llvm::Type *Ty = CGF.UnqualPtrTy;
5051 return CGF.MakeAddrLValue(
5052 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5053 Dead->getType());
5054 }
5055 return CGF.EmitLValue(Live);
5056 }
5057 }
5058 return std::nullopt;
5059}
5060struct ConditionalInfo {
5061 llvm::BasicBlock *lhsBlock, *rhsBlock;
5062 std::optional<LValue> LHS, RHS;
5063};
5064
5065// Create and generate the 3 blocks for a conditional operator.
5066// Leaves the 'current block' in the continuation basic block.
5067template<typename FuncTy>
5068ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5070 const FuncTy &BranchGenFunc) {
5071 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
5072 CGF.createBasicBlock("cond.false"), std::nullopt,
5073 std::nullopt};
5074 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
5075
5076 CodeGenFunction::ConditionalEvaluation eval(CGF);
5077 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
5078 CGF.getProfileCount(E));
5079
5080 // Any temporaries created here are conditional.
5081 CGF.EmitBlock(Info.lhsBlock);
5083 eval.begin(CGF);
5084 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5085 eval.end(CGF);
5086 Info.lhsBlock = CGF.Builder.GetInsertBlock();
5087
5088 if (Info.LHS)
5089 CGF.Builder.CreateBr(endBlock);
5090
5091 // Any temporaries created here are conditional.
5092 CGF.EmitBlock(Info.rhsBlock);
5093 eval.begin(CGF);
5094 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5095 eval.end(CGF);
5096 Info.rhsBlock = CGF.Builder.GetInsertBlock();
5097 CGF.EmitBlock(endBlock);
5098
5099 return Info;
5100}
5101} // namespace
5102
5104 const AbstractConditionalOperator *E) {
5105 if (!E->isGLValue()) {
5106 // ?: here should be an aggregate.
5107 assert(hasAggregateEvaluationKind(E->getType()) &&
5108 "Unexpected conditional operator!");
5109 return (void)EmitAggExprToLValue(E);
5110 }
5111
5112 OpaqueValueMapping binding(*this, E);
5113 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
5114 return;
5115
5116 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
5117 CGF.EmitIgnoredExpr(E);
5118 return LValue{};
5119 });
5120}
5123 if (!expr->isGLValue()) {
5124 // ?: here should be an aggregate.
5125 assert(hasAggregateEvaluationKind(expr->getType()) &&
5126 "Unexpected conditional operator!");
5127 return EmitAggExprToLValue(expr);
5128 }
5129
5130 OpaqueValueMapping binding(*this, expr);
5131 if (std::optional<LValue> Res =
5132 HandleConditionalOperatorLValueSimpleCase(*this, expr))
5133 return *Res;
5134
5135 ConditionalInfo Info = EmitConditionalBlocks(
5136 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
5137 return EmitLValueOrThrowExpression(CGF, E);
5138 });
5139
5140 if ((Info.LHS && !Info.LHS->isSimple()) ||
5141 (Info.RHS && !Info.RHS->isSimple()))
5142 return EmitUnsupportedLValue(expr, "conditional operator");
5143
5144 if (Info.LHS && Info.RHS) {
5145 Address lhsAddr = Info.LHS->getAddress(*this);
5146 Address rhsAddr = Info.RHS->getAddress(*this);
5148 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
5149 Builder.GetInsertBlock(), expr->getType());
5150 AlignmentSource alignSource =
5151 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
5152 Info.RHS->getBaseInfo().getAlignmentSource());
5154 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
5155 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
5156 TBAAInfo);
5157 } else {
5158 assert((Info.LHS || Info.RHS) &&
5159 "both operands of glvalue conditional are throw-expressions?");
5160 return Info.LHS ? *Info.LHS : *Info.RHS;
5161 }
5162}
5163
5164/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5165/// type. If the cast is to a reference, we can have the usual lvalue result,
5166/// otherwise if a cast is needed by the code generator in an lvalue context,
5167/// then it must mean that we need the address of an aggregate in order to
5168/// access one of its members. This can happen for all the reasons that casts
5169/// are permitted with aggregate result, including noop aggregate casts, and
5170/// cast from scalar to union.
5172 switch (E->getCastKind()) {
5173 case CK_ToVoid:
5174 case CK_BitCast:
5175 case CK_LValueToRValueBitCast:
5176 case CK_ArrayToPointerDecay:
5177 case CK_FunctionToPointerDecay:
5178 case CK_NullToMemberPointer:
5179 case CK_NullToPointer:
5180 case CK_IntegralToPointer:
5181 case CK_PointerToIntegral:
5182 case CK_PointerToBoolean:
5183 case CK_IntegralCast:
5184 case CK_BooleanToSignedIntegral:
5185 case CK_IntegralToBoolean:
5186 case CK_IntegralToFloating:
5187 case CK_FloatingToIntegral:
5188 case CK_FloatingToBoolean:
5189 case CK_FloatingCast:
5190 case CK_FloatingRealToComplex:
5191 case CK_FloatingComplexToReal:
5192 case CK_FloatingComplexToBoolean:
5193 case CK_FloatingComplexCast:
5194 case CK_FloatingComplexToIntegralComplex:
5195 case CK_IntegralRealToComplex:
5196 case CK_IntegralComplexToReal:
5197 case CK_IntegralComplexToBoolean:
5198 case CK_IntegralComplexCast:
5199 case CK_IntegralComplexToFloatingComplex:
5200 case CK_DerivedToBaseMemberPointer:
5201 case CK_BaseToDerivedMemberPointer:
5202 case CK_MemberPointerToBoolean:
5203 case CK_ReinterpretMemberPointer:
5204 case CK_AnyPointerToBlockPointerCast:
5205 case CK_ARCProduceObject:
5206 case CK_ARCConsumeObject:
5207 case CK_ARCReclaimReturnedObject:
5208 case CK_ARCExtendBlockObject:
5209 case CK_CopyAndAutoreleaseBlockObject:
5210 case CK_IntToOCLSampler:
5211 case CK_FloatingToFixedPoint:
5212 case CK_FixedPointToFloating:
5213 case CK_FixedPointCast:
5214 case CK_FixedPointToBoolean:
5215 case CK_FixedPointToIntegral:
5216 case CK_IntegralToFixedPoint:
5217 case CK_MatrixCast:
5218 case CK_HLSLVectorTruncation:
5219 case CK_HLSLArrayRValue:
5220 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5221
5222 case CK_Dependent:
5223 llvm_unreachable("dependent cast kind in IR gen!");
5224
5225 case CK_BuiltinFnToFnPtr:
5226 llvm_unreachable("builtin functions are handled elsewhere");
5227
5228 // These are never l-values; just use the aggregate emission code.
5229 case CK_NonAtomicToAtomic:
5230 case CK_AtomicToNonAtomic:
5231 return EmitAggExprToLValue(E);
5232
5233 case CK_Dynamic: {
5234 LValue LV = EmitLValue(E->getSubExpr());
5235 Address V = LV.getAddress(*this);
5236 const auto *DCE = cast<CXXDynamicCastExpr>(E);
5238 }
5239
5240 case CK_ConstructorConversion:
5241 case CK_UserDefinedConversion:
5242 case CK_CPointerToObjCPointerCast:
5243 case CK_BlockPointerToObjCPointerCast:
5244 case CK_LValueToRValue:
5245 return EmitLValue(E->getSubExpr());
5246
5247 case CK_NoOp: {
5248 // CK_NoOp can model a qualification conversion, which can remove an array
5249 // bound and change the IR type.
5250 // FIXME: Once pointee types are removed from IR, remove this.
5251 LValue LV = EmitLValue(E->getSubExpr());
5252 // Propagate the volatile qualifer to LValue, if exist in E.
5254 LV.getQuals() = E->getType().getQualifiers();
5255 if (LV.isSimple()) {
5256 Address V = LV.getAddress(*this);
5257 if (V.isValid()) {
5258 llvm::Type *T = ConvertTypeForMem(E->getType());
5259 if (V.getElementType() != T)
5260 LV.setAddress(V.withElementType(T));
5261 }
5262 }
5263 return LV;
5264 }
5265
5266 case CK_UncheckedDerivedToBase:
5267 case CK_DerivedToBase: {
5268 const auto *DerivedClassTy =
5269 E->getSubExpr()->getType()->castAs<RecordType>();
5270 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5271
5272 LValue LV = EmitLValue(E->getSubExpr());
5273 Address This = LV.getAddress(*this);
5274
5275 // Perform the derived-to-base conversion
5277 This, DerivedClassDecl, E->path_begin(), E->path_end(),
5278 /*NullCheckValue=*/false, E->getExprLoc());
5279
5280 // TODO: Support accesses to members of base classes in TBAA. For now, we
5281 // conservatively pretend that the complete object is of the base class
5282 // type.
5283 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5285 }
5286 case CK_ToUnion:
5287 return EmitAggExprToLValue(E);
5288 case CK_BaseToDerived: {
5289 const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
5290 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5291
5292 LValue LV = EmitLValue(E->getSubExpr());
5293
5294 // Perform the base-to-derived conversion
5296 LV.getAddress(*this), DerivedClassDecl, E->path_begin(), E->path_end(),
5297 /*NullCheckValue=*/false);
5298
5299 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5300 // performed and the object is not of the derived type.
5303 E->getType());
5304
5305 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5306 EmitVTablePtrCheckForCast(E->getType(), Derived,
5307 /*MayBeNull=*/false, CFITCK_DerivedCast,
5308 E->getBeginLoc());
5309
5310 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5312 }
5313 case CK_LValueBitCast: {
5314 // This must be a reinterpret_cast (or c-style equivalent).
5315 const auto *CE = cast<ExplicitCastExpr>(E);
5316
5317 CGM.EmitExplicitCastExprType(CE, this);
5318 LValue LV = EmitLValue(E->getSubExpr());
5319 Address V = LV.getAddress(*this).withElementType(
5320 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5321
5322 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5324 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5325 E->getBeginLoc());
5326
5327 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5329 }
5330 case CK_AddressSpaceConversion: {
5331 LValue LV = EmitLValue(E->getSubExpr());
5332 QualType DestTy = getContext().getPointerType(E->getType());
5333 llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5334 *this, LV.getPointer(*this),
5336 E->getType().getAddressSpace(), ConvertType(DestTy));
5338 LV.getAddress(*this).getAlignment()),
5339 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5340 }
5341 case CK_ObjCObjectLValueCast: {
5342 LValue LV = EmitLValue(E->getSubExpr());
5344 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5346 }
5347 case CK_ZeroToOCLOpaqueType:
5348 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5349
5350 case CK_VectorSplat: {
5351 // LValue results of vector splats are only supported in HLSL.
5352 if (!getLangOpts().HLSL)
5353 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5354 return EmitLValue(E->getSubExpr());
5355 }
5356 }
5357
5358 llvm_unreachable("Unhandled lvalue cast kind?");
5359}
5360
5364}
5365
5366LValue
5369
5370 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5371 it = OpaqueLValues.find(e);
5372
5373 if (it != OpaqueLValues.end())
5374 return it->second;
5375
5376 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5377 return EmitLValue(e->getSourceExpr());
5378}
5379
5380RValue
5383
5384 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5385 it = OpaqueRValues.find(e);
5386
5387 if (it != OpaqueRValues.end())
5388 return it->second;
5389
5390 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5391 return EmitAnyExpr(e->getSourceExpr());
5392}
5393
5395 const FieldDecl *FD,
5397 QualType FT = FD->getType();
5398 LValue FieldLV = EmitLValueForField(LV, FD);
5399 switch (getEvaluationKind(FT)) {
5400 case TEK_Complex:
5401 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5402 case TEK_Aggregate:
5403 return FieldLV.asAggregateRValue(*this);
5404 case TEK_Scalar:
5405 // This routine is used to load fields one-by-one to perform a copy, so
5406 // don't load reference fields.
5407 if (FD->getType()->isReferenceType())
5408 return RValue::get(FieldLV.getPointer(*this));
5409 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5410 // primitive load.
5411 if (FieldLV.isBitField())
5412 return EmitLoadOfLValue(FieldLV, Loc);
5413 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
5414 }
5415 llvm_unreachable("bad evaluation kind");
5416}
5417
5418//===--------------------------------------------------------------------===//
5419// Expression Emission
5420//===--------------------------------------------------------------------===//
5421
5423 ReturnValueSlot ReturnValue) {
5424 // Builtins never have block type.
5425 if (E->getCallee()->getType()->isBlockPointerType())
5426 return EmitBlockCallExpr(E, ReturnValue);
5427
5428 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
5430
5431 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
5433
5434 // A CXXOperatorCallExpr is created even for explicit object methods, but
5435 // these should be treated like static function call.
5436 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
5437 if (const auto *MD =
5438 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
5439 MD && MD->isImplicitObjectMemberFunction())
5441
5442 CGCallee callee = EmitCallee(E->getCallee());
5443
5444 if (callee.isBuiltin()) {
5445 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
5446 E, ReturnValue);
5447 }
5448
5449 if (callee.isPseudoDestructor()) {
5451 }
5452
5453 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue);
5454}
5455
5456/// Emit a CallExpr without considering whether it might be a subclass.
5458 ReturnValueSlot ReturnValue) {
5460 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
5461}
5462
5463// Detect the unusual situation where an inline version is shadowed by a
5464// non-inline version. In that case we should pick the external one
5465// everywhere. That's GCC behavior too.
5467 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
5468 if (!PD->isInlineBuiltinDeclaration())
5469 return false;
5470 return true;
5471}
5472
5474 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
5475
5476 if (auto builtinID = FD->getBuiltinID()) {
5477 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
5478 std::string NoBuiltins = "no-builtins";
5479
5480 StringRef Ident = CGF.CGM.getMangledName(GD);
5481 std::string FDInlineName = (Ident + ".inline").str();
5482
5483 bool IsPredefinedLibFunction =
5485 bool HasAttributeNoBuiltin =
5486 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
5487 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
5488
5489 // When directing calling an inline builtin, call it through it's mangled
5490 // name to make it clear it's not the actual builtin.
5491 if (CGF.CurFn->getName() != FDInlineName &&
5493 llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
5494 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
5495 llvm::Module *M = Fn->getParent();
5496 llvm::Function *Clone = M->getFunction(FDInlineName);
5497 if (!Clone) {
5498 Clone = llvm::Function::Create(Fn->getFunctionType(),
5499 llvm::GlobalValue::InternalLinkage,
5500 Fn->getAddressSpace(), FDInlineName, M);
5501 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
5502 }
5503 return CGCallee::forDirect(Clone, GD);
5504 }
5505
5506 // Replaceable builtins provide their own implementation of a builtin. If we
5507 // are in an inline builtin implementation, avoid trivial infinite
5508 // recursion. Honor __attribute__((no_builtin("foo"))) or
5509 // __attribute__((no_builtin)) on the current function unless foo is
5510 // not a predefined library function which means we must generate the
5511 // builtin no matter what.
5512 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
5513 return CGCallee::forBuiltin(builtinID, FD);
5514 }
5515
5516 llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
5517 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
5518 FD->hasAttr<CUDAGlobalAttr>())
5519 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
5520 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
5521
5522 return CGCallee::forDirect(CalleePtr, GD);
5523}
5524
5526 E = E->IgnoreParens();
5527
5528 // Look through function-to-pointer decay.
5529 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
5530 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
5531 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
5532 return EmitCallee(ICE->getSubExpr());
5533 }
5534
5535 // Resolve direct calls.
5536 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
5537 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
5538 return EmitDirectCallee(*this, FD);
5539 }
5540 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
5541 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
5542 EmitIgnoredExpr(ME->getBase());
5543 return EmitDirectCallee(*this, FD);
5544 }
5545
5546 // Look through template substitutions.
5547 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
5548 return EmitCallee(NTTP->getReplacement());
5549
5550 // Treat pseudo-destructor calls differently.
5551 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
5553 }
5554
5555 // Otherwise, we have an indirect reference.
5556 llvm::Value *calleePtr;
5558 if (auto ptrType = E->getType()->getAs<PointerType>()) {
5559 calleePtr = EmitScalarExpr(E);
5560 functionType = ptrType->getPointeeType();
5561 } else {
5562 functionType = E->getType();
5563 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
5564 }
5565 assert(functionType->isFunctionType());
5566
5567 GlobalDecl GD;
5568 if (const auto *VD =
5569 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
5570 GD = GlobalDecl(VD);
5571
5572 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
5573 CGCallee callee(calleeInfo, calleePtr);
5574 return callee;
5575}
5576
5578 // Comma expressions just emit their LHS then their RHS as an l-value.
5579 if (E->getOpcode() == BO_Comma) {
5580 EmitIgnoredExpr(E->getLHS());
5582 return EmitLValue(E->getRHS());
5583 }
5584
5585 if (E->getOpcode() == BO_PtrMemD ||
5586 E->getOpcode() == BO_PtrMemI)
5588
5589 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
5590
5591 // Note that in all of these cases, __block variables need the RHS
5592 // evaluated first just in case the variable gets moved by the RHS.
5593
5594 switch (getEvaluationKind(E->getType())) {
5595 case TEK_Scalar: {
5596 switch (E->getLHS()->getType().getObjCLifetime()) {
5598 return EmitARCStoreStrong(E, /*ignored*/ false).first;
5599
5601 return EmitARCStoreAutoreleasing(E).first;
5602
5603 // No reason to do any of these differently.
5607 break;
5608 }
5609
5610 // TODO: Can we de-duplicate this code with the corresponding code in
5611 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
5612 RValue RV;
5613 llvm::Value *Previous = nullptr;
5614 QualType SrcType = E->getRHS()->getType();
5615 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5616 // we want to extract that value and potentially (if the bitfield sanitizer
5617 // is enabled) use it to check for an implicit conversion.
5618 if (E->getLHS()->refersToBitField()) {
5619 llvm::Value *RHS =
5621 RV = RValue::get(RHS);
5622 } else
5623 RV = EmitAnyExpr(E->getRHS());
5624
5626
5627 if (RV.isScalar())
5629
5630 if (LV.isBitField()) {
5631 llvm::Value *Result = nullptr;
5632 // If bitfield sanitizers are enabled we want to use the result
5633 // to check whether a truncation or sign change has occurred.
5634 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
5636 else
5638
5639 // If the expression contained an implicit conversion, make sure
5640 // to use the value before the scalar conversion.
5641 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
5642 QualType DstType = E->getLHS()->getType();
5643 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
5644 LV.getBitFieldInfo(), E->getExprLoc());
5645 } else
5646 EmitStoreThroughLValue(RV, LV);
5647
5648 if (getLangOpts().OpenMP)
5650 E->getLHS());
5651 return LV;
5652 }
5653
5654 case TEK_Complex:
5656
5657 case TEK_Aggregate:
5658 return EmitAggExprToLValue(E);
5659 }
5660 llvm_unreachable("bad evaluation kind");
5661}
5662
5664 RValue RV = EmitCallExpr(E);
5665
5666 if (!RV.isScalar())
5667 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5669
5670 assert(E->getCallReturnType(getContext())->isReferenceType() &&
5671 "Can't have a scalar return unless the return type is a "
5672 "reference type!");
5673
5675}
5676
5678 // FIXME: This shouldn't require another copy.
5679 return EmitAggExprToLValue(E);
5680}
5681
5684 && "binding l-value to type which needs a temporary");
5685 AggValueSlot Slot = CreateAggTemp(E->getType());
5686 EmitCXXConstructExpr(E, Slot);
5688}
5689
5690LValue
5693}
5694
5698}
5699
5703}
5704
5705LValue
5707 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
5709 EmitAggExpr(E->getSubExpr(), Slot);
5710 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
5712}
5713
5716
5717 if (!RV.isScalar())
5718 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5720
5721 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
5722 "Can't have a scalar return unless the return type is a "
5723 "reference type!");
5724
5726}
5727
5729 Address V =
5732}
5733
5735 const ObjCIvarDecl *Ivar) {
5736 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
5737}
5738
5739llvm::Value *
5741 const ObjCIvarDecl *Ivar) {
5742 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
5743 QualType PointerDiffType = getContext().getPointerDiffType();
5744 return Builder.CreateZExtOrTrunc(OffsetValue,
5745 getTypes().ConvertType(PointerDiffType));
5746}
5747
5749 llvm::Value *BaseValue,
5750 const ObjCIvarDecl *Ivar,
5751 unsigned CVRQualifiers) {
5752 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
5753 Ivar, CVRQualifiers);
5754}
5755
5757 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5758 llvm::Value *BaseValue = nullptr;
5759 const Expr *BaseExpr = E->getBase();
5760 Qualifiers BaseQuals;
5761 QualType ObjectTy;
5762 if (E->isArrow()) {
5763 BaseValue = EmitScalarExpr(BaseExpr);
5764 ObjectTy = BaseExpr->getType()->getPointeeType();
5765 BaseQuals = ObjectTy.getQualifiers();
5766 } else {
5767 LValue BaseLV = EmitLValue(BaseExpr);
5768 BaseValue = BaseLV.getPointer(*this);
5769 ObjectTy = BaseExpr->getType();
5770 BaseQuals = ObjectTy.getQualifiers();
5771 }
5772
5773 LValue LV =
5774 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
5775 BaseQuals.getCVRQualifiers());
5777 return LV;
5778}
5779
5781 // Can only get l-value for message expression returning aggregate type
5782 RValue RV = EmitAnyExprToTemp(E);
5783 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5785}
5786
5787RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee,
5788 const CallExpr *E, ReturnValueSlot ReturnValue,
5789 llvm::Value *Chain) {
5790 // Get the actual function type. The callee type will always be a pointer to
5791 // function type or a block pointer type.
5792 assert(CalleeType->isFunctionPointerType() &&
5793 "Call must have function pointer type!");
5794
5795 const Decl *TargetDecl =
5796 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
5797
5798 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
5799 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
5800 "trying to emit a call to an immediate function");
5801
5802 CalleeType = getContext().getCanonicalType(CalleeType);
5803
5804 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
5805
5806 CGCallee Callee = OrigCallee;
5807
5808 if (SanOpts.has(SanitizerKind::Function) &&
5809 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
5810 !isa<FunctionNoProtoType>(PointeeType)) {
5811 if (llvm::Constant *PrefixSig =
5813 SanitizerScope SanScope(this);
5814 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
5815
5816 llvm::Type *PrefixSigType = PrefixSig->getType();
5817 llvm::StructType *PrefixStructTy = llvm::StructType::get(
5818 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
5819
5820 llvm::Value *CalleePtr = Callee.getFunctionPointer();
5821
5822 // On 32-bit Arm, the low bit of a function pointer indicates whether
5823 // it's using the Arm or Thumb instruction set. The actual first
5824 // instruction lives at the same address either way, so we must clear
5825 // that low bit before using the function address to find the prefix
5826 // structure.
5827 //
5828 // This applies to both Arm and Thumb target triples, because
5829 // either one could be used in an interworking context where it
5830 // might be passed function pointers of both types.
5831 llvm::Value *AlignedCalleePtr;
5832 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
5833 llvm::Value *CalleeAddress =
5834 Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
5835 llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
5836 llvm::Value *AlignedCalleeAddress =
5837 Builder.CreateAnd(CalleeAddress, Mask);
5838 AlignedCalleePtr =
5839 Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
5840 } else {
5841 AlignedCalleePtr = CalleePtr;
5842 }
5843
5844 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
5845 llvm::Value *CalleeSigPtr =
5846 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
5847 llvm::Value *CalleeSig =
5848 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
5849 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
5850
5851 llvm::BasicBlock *Cont = createBasicBlock("cont");
5852 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
5853 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
5854
5855 EmitBlock(TypeCheck);
5856 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
5857 Int32Ty,
5858 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
5859 getPointerAlign());
5860 llvm::Value *CalleeTypeHashMatch =
5861 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
5862 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
5863 EmitCheckTypeDescriptor(CalleeType)};
5864 EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::Function),
5865 SanitizerHandler::FunctionTypeMismatch, StaticData,
5866 {CalleePtr});
5867
5868 Builder.CreateBr(Cont);
5869 EmitBlock(Cont);
5870 }
5871 }
5872
5873 const auto *FnType = cast<FunctionType>(PointeeType);
5874
5875 // If we are checking indirect calls and this call is indirect, check that the
5876 // function pointer is a member of the bit set for the function type.
5877 if (SanOpts.has(SanitizerKind::CFIICall) &&
5878 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
5879 SanitizerScope SanScope(this);
5880 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
5881
5882 llvm::Metadata *MD;
5883 if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
5885 else
5887
5888 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
5889
5890 llvm::Value *CalleePtr = Callee.getFunctionPointer();
5891 llvm::Value *TypeTest = Builder.CreateCall(
5892 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
5893
5894 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
5895 llvm::Constant *StaticData[] = {
5896 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
5899 };
5900 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
5901 EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId,
5902 CalleePtr, StaticData);
5903 } else {
5904 EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall),
5905 SanitizerHandler::CFICheckFail, StaticData,
5906 {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
5907 }
5908 }
5909
5910 CallArgList Args;
5911 if (Chain)
5912 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
5913
5914 // C++17 requires that we evaluate arguments to a call using assignment syntax
5915 // right-to-left, and that we evaluate arguments to certain other operators
5916 // left-to-right. Note that we allow this to override the order dictated by
5917 // the calling convention on the MS ABI, which means that parameter
5918 // destruction order is not necessarily reverse construction order.
5919 // FIXME: Revisit this based on C++ committee response to unimplementability.
5921 bool StaticOperator = false;
5922 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
5923 if (OCE->isAssignmentOp())
5925 else {
5926 switch (OCE->getOperator()) {
5927 case OO_LessLess:
5928 case OO_GreaterGreater:
5929 case OO_AmpAmp:
5930 case OO_PipePipe:
5931 case OO_Comma:
5932 case OO_ArrowStar:
5934 break;
5935 default:
5936 break;
5937 }
5938 }
5939
5940 if (const auto *MD =
5941 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
5942 MD && MD->isStatic())
5943 StaticOperator = true;
5944 }
5945
5946 auto Arguments = E->arguments();
5947 if (StaticOperator) {
5948 // If we're calling a static operator, we need to emit the object argument
5949 // and ignore it.
5950 EmitIgnoredExpr(E->getArg(0));
5951 Arguments = drop_begin(Arguments, 1);
5952 }
5953 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
5954 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
5955
5957 Args, FnType, /*ChainCall=*/Chain);
5958
5959 // C99 6.5.2.2p6:
5960 // If the expression that denotes the called function has a type
5961 // that does not include a prototype, [the default argument
5962 // promotions are performed]. If the number of arguments does not
5963 // equal the number of parameters, the behavior is undefined. If
5964 // the function is defined with a type that includes a prototype,
5965 // and either the prototype ends with an ellipsis (, ...) or the
5966 // types of the arguments after promotion are not compatible with
5967 // the types of the parameters, the behavior is undefined. If the
5968 // function is defined with a type that does not include a
5969 // prototype, and the types of the arguments after promotion are
5970 // not compatible with those of the parameters after promotion,
5971 // the behavior is undefined [except in some trivial cases].
5972 // That is, in the general case, we should assume that a call
5973 // through an unprototyped function type works like a *non-variadic*
5974 // call. The way we make this work is to cast to the exact type
5975 // of the promoted arguments.
5976 //
5977 // Chain calls use this same code path to add the invisible chain parameter
5978 // to the function type.
5979 if (isa<FunctionNoProtoType>(FnType) || Chain) {
5980 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
5981 int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace();
5982 CalleeTy = CalleeTy->getPointerTo(AS);
5983
5984 llvm::Value *CalleePtr = Callee.getFunctionPointer();
5985 CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast");
5986 Callee.setFunctionPointer(CalleePtr);
5987 }
5988
5989 // HIP function pointer contains kernel handle when it is used in triple
5990 // chevron. The kernel stub needs to be loaded from kernel handle and used
5991 // as callee.
5992 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
5993 isa<CUDAKernelCallExpr>(E) &&
5994 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
5995 llvm::Value *Handle = Callee.getFunctionPointer();
5996 auto *Stub = Builder.CreateLoad(
5997 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
5998 Callee.setFunctionPointer(Stub);
5999 }
6000 llvm::CallBase *CallOrInvoke = nullptr;
6001 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke,
6002 E == MustTailCall, E->getExprLoc());
6003
6004 // Generate function declaration DISuprogram in order to be used
6005 // in debug info about call sites.
6006 if (CGDebugInfo *DI = getDebugInfo()) {
6007 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
6008 FunctionArgList Args;
6009 QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
6010 DI->EmitFuncDeclForCallSite(CallOrInvoke,
6011 DI->getFunctionType(CalleeDecl, ResTy, Args),
6012 CalleeDecl);
6013 }
6014 }
6015
6016 return Call;
6017}
6018
6021 Address BaseAddr = Address::invalid();
6022 if (E->getOpcode() == BO_PtrMemI) {
6023 BaseAddr = EmitPointerWithAlignment(E->getLHS());
6024 } else {
6025 BaseAddr = EmitLValue(E->getLHS()).getAddress(*this);
6026 }
6027
6028 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
6029 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6030
6031 LValueBaseInfo BaseInfo;
6032 TBAAAccessInfo TBAAInfo;
6033 Address MemberAddr =
6034 EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo,
6035 &TBAAInfo);
6036
6037 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
6038}
6039
6040/// Given the address of a temporary variable, produce an r-value of
6041/// its type.
6043 QualType type,
6044 SourceLocation loc) {
6046 switch (getEvaluationKind(type)) {
6047 case TEK_Complex:
6048 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
6049 case TEK_Aggregate:
6050 return lvalue.asAggregateRValue(*this);
6051 case TEK_Scalar:
6052 return RValue::get(EmitLoadOfScalar(lvalue, loc));
6053 }
6054 llvm_unreachable("bad evaluation kind");
6055}
6056
6057void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6058 assert(Val->getType()->isFPOrFPVectorTy());
6059 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6060 return;
6061
6062 llvm::MDBuilder MDHelper(getLLVMContext());
6063 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6064
6065 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
6066}
6067
6068void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
6069 llvm::Type *EltTy = Val->getType()->getScalarType();
6070 if (!EltTy->isFloatTy())
6071 return;
6072
6073 if ((getLangOpts().OpenCL &&
6074 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6075 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6076 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6077 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6078 //
6079 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6080 // build option allows an application to specify that single precision
6081 // floating-point divide (x/y and 1/x) and sqrt used in the program
6082 // source are correctly rounded.
6083 //
6084 // TODO: CUDA has a prec-sqrt flag
6085 SetFPAccuracy(Val, 3.0f);
6086 }
6087}
6088
6089void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
6090 llvm::Type *EltTy = Val->getType()->getScalarType();
6091 if (!EltTy->isFloatTy())
6092 return;
6093
6094 if ((getLangOpts().OpenCL &&
6095 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6096 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6097 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6098 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6099 //
6100 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6101 // build option allows an application to specify that single precision
6102 // floating-point divide (x/y and 1/x) and sqrt used in the program
6103 // source are correctly rounded.
6104 //
6105 // TODO: CUDA has a prec-div flag
6106 SetFPAccuracy(Val, 2.5f);
6107 }
6108}
6109
6110namespace {
6111 struct LValueOrRValue {
6112 LValue LV;
6113 RValue RV;
6114 };
6115}
6116
6117static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
6118 const PseudoObjectExpr *E,
6119 bool forLValue,
6120 AggValueSlot slot) {
6122
6123 // Find the result expression, if any.
6124 const Expr *resultExpr = E->getResultExpr();
6125 LValueOrRValue result;
6126
6128 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
6129 const Expr *semantic = *i;
6130
6131 // If this semantic expression is an opaque value, bind it
6132 // to the result of its source expression.
6133 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
6134 // Skip unique OVEs.
6135 if (ov->isUnique()) {
6136 assert(ov != resultExpr &&
6137 "A unique OVE cannot be used as the result expression");
6138 continue;
6139 }
6140
6141 // If this is the result expression, we may need to evaluate
6142 // directly into the slot.
6143 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
6144 OVMA opaqueData;
6145 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
6147 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
6148 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
6150 opaqueData = OVMA::bind(CGF, ov, LV);
6151 result.RV = slot.asRValue();
6152
6153 // Otherwise, emit as normal.
6154 } else {
6155 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
6156
6157 // If this is the result, also evaluate the result now.
6158 if (ov == resultExpr) {
6159 if (forLValue)
6160 result.LV = CGF.EmitLValue(ov);
6161 else
6162 result.RV = CGF.EmitAnyExpr(ov, slot);
6163 }
6164 }
6165
6166 opaques.push_back(opaqueData);
6167
6168 // Otherwise, if the expression is the result, evaluate it
6169 // and remember the result.
6170 } else if (semantic == resultExpr) {
6171 if (forLValue)
6172 result.LV = CGF.EmitLValue(semantic);
6173 else
6174 result.RV = CGF.EmitAnyExpr(semantic, slot);
6175
6176 // Otherwise, evaluate the expression in an ignored context.
6177 } else {
6178 CGF.EmitIgnoredExpr(semantic);
6179 }
6180 }
6181
6182 // Unbind all the opaques now.
6183 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
6184 opaques[i].unbind(CGF);
6185
6186 return result;
6187}
6188
6190 AggValueSlot slot) {
6191 return emitPseudoObjectExpr(*this, E, false, slot).RV;
6192}
6193
6195 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
6196}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3285
DynTypedNode Node
Defines enum values for all the target-independent builtin functions.
CodeGenFunction::ComplexPairTy ComplexPairTy
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false)
Definition: CGExpr.cpp:2643
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM)
Named Registers are named metadata pointing to the register name which will be read from/written to a...
Definition: CGExpr.cpp:2886
static const Expr * isSimpleArrayDecayOperand(const Expr *E)
isSimpleArrayDecayOperand - If the specified expr is a simple decay from an array to pointer,...
Definition: CGExpr.cpp:3914
static llvm::cl::opt< bool > ClSanitizeGuardChecks("ubsan-guard-checks", llvm::cl::Optional, llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."))
static bool hasBooleanRepresentation(QualType Ty)
Definition: CGExpr.cpp:1847
static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind)
Definition: CGExpr.cpp:3441
static bool hasBPFPreserveStaticOffset(const RecordDecl *D)
Definition: CGExpr.cpp:3984
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field)
Drill down to the storage of a field without walking into reference types.
Definition: CGExpr.cpp:4741
ConstantEmissionKind
Can we constant-emit a load of a reference to a variable of the given type? This is different from pr...
Definition: CGExpr.cpp:1701
@ CEK_AsReferenceOnly
Definition: CGExpr.cpp:1703
@ CEK_AsValueOnly
Definition: CGExpr.cpp:1705
@ CEK_None
Definition: CGExpr.cpp:1702
@ CEK_AsValueOrReference
Definition: CGExpr.cpp:1704
static bool isConstantEmittableObjectType(QualType type)
Given an object of the given canonical type, can we safely copy a value out of it based on its initia...
Definition: CGExpr.cpp:1676
static llvm::Value * emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, llvm::Value *High)
Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
Definition: CGExpr.cpp:654
static QualType getFixedSizeElementType(const ASTContext &ctx, const VariableArrayType *vla)
Definition: CGExpr.cpp:3975
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue)
Definition: CGExpr.cpp:2874
static std::optional< LValue > EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand)
Emit the operand of a glvalue conditional operator.
Definition: CGExpr.cpp:5019
static llvm::Value * emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Type *elemType, llvm::Value *ptr, ArrayRef< llvm::Value * > indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name="arrayidx")
Definition: CGExpr.cpp:3928
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD)
Definition: CGExpr.cpp:2865
static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, CodeGenFunction &CGF, bool IsVector=true)
Definition: CGExpr.cpp:2052
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot)
Definition: CGExpr.cpp:6117
static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, Address &Addr)
Definition: CGExpr.cpp:4000
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, const MemberExpr *ME)
Definition: CGExpr.cpp:1811
static llvm::Value * getArrayIndexingBound(CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)
If Base is known to point to the start of an array, return the length of that array.
Definition: CGExpr.cpp:926
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2156
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type)
Definition: CGExpr.cpp:1707
static QualType getConstantExprReferredType(const FullExpr *E, const ASTContext &Ctx)
Definition: CGExpr.cpp:1510
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool IsBool)
Definition: CGExpr.cpp:1860
static std::optional< int64_t > getOffsetDifferenceInBits(CodeGenFunction &CGF, const FieldDecl *FD1, const FieldDecl *FD2)
Returns the relative offset difference between FD1 and FD2.
Definition: CGExpr.cpp:4129
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD)
Definition: CGExpr.cpp:5473
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *FD, RecIndicesTy &Indices)
Definition: CGExpr.cpp:1075
static LValue EmitThreadPrivateVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc)
Definition: CGExpr.cpp:2740
static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD)
Definition: CGExpr.cpp:5466
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD)
Definition: CGExpr.cpp:2814
static bool hasAnyVptr(const QualType Type, const ASTContext &Context)
Definition: CGExpr.cpp:4767
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *FD, int64_t &Offset)
The offset of a field from the beginning of the record.
Definition: CGExpr.cpp:4098
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase)
Given an array base, check whether its member access belongs to a record with preserve_access_index a...
Definition: CGExpr.cpp:4013
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T)
Definition: CGExpr.cpp:2754
static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, const DeclRefExpr *E, const VarDecl *VD)
Determine whether we can emit a reference to VD from the current context, despite not necessarily hav...
Definition: CGExpr.cpp:2911
static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx, CharUnits eltSize)
Definition: CGExpr.cpp:3960
static llvm::cl::opt< bool > ClSanitizeDebugDeoptimization("ubsan-unique-traps", llvm::cl::Optional, llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check."))
static RawAddress createReferenceTemporary(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *Inner, RawAddress *Alloca=nullptr)
Definition: CGExpr.cpp:400
static bool isAAPCS(const TargetInfo &TargetInfo)
Helper method to check if the underlying ABI is AAPCS.
Definition: CGExpr.cpp:447
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2077
static llvm::Constant * EmitFunctionDeclPointer(CodeGenModule &CGM, GlobalDecl GD)
Definition: CGExpr.cpp:2853
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition: CGExpr.cpp:1255
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field)
Definition: CGExpr.cpp:4754
const SanitizerHandlerInfo SanitizerHandlers[]
Definition: CGExpr.cpp:3458
static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, const FieldDecl *Field)
Get the address of a zero-sized field within a record.
Definition: CGExpr.cpp:4727
static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef< llvm::Value * > FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB)
Definition: CGExpr.cpp:3464
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound)
Definition: CGExpr.cpp:4366
static void pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *E, Address ReferenceTemporary)
Definition: CGExpr.cpp:284
StringRef Filename
Definition: Format.cpp:2975
llvm::MachO::Record Record
Definition: MachO.h:31
static const SanitizerMask AlwaysRecoverable
static const SanitizerMask Unrecoverable
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
SourceLocation Loc
Definition: SemaObjC.cpp:755
Defines the SourceManager interface.
StateNode * Previous
const LValueBase getLValueBase() const
Definition: APValue.cpp:974
bool isLValue() const
Definition: APValue.h:406
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
SourceManager & getSourceManager()
Definition: ASTContext.h:705
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2575
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType VoidPtrTy
Definition: ASTContext.h:1118
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:646
const LangOptions & getLangOpts() const
Definition: ASTContext.h:775
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType BoolTy
Definition: ASTContext.h:1092
const NoSanitizeList & getNoSanitizeList() const
Definition: ASTContext.h:785
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2341
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
Definition: ASTContext.h:1091
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2771
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2345
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4141
Expr * getCond() const
getCond - Return the expression representing the condition for the ?: operator.
Definition: Expr.h:4319
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition: Expr.h:4325
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition: Expr.h:4331
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition: Expr.h:6734
Expr * getBase()
Get base of the array section.
Definition: Expr.h:6800
Expr * getLength()
Get length of array section.
Definition: Expr.h:6810
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Definition: Expr.cpp:5068
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:6839
Expr * getLowerBound()
Get lower bound of array section.
Definition: Expr.h:6804
bool isOpenACCArraySection() const
Definition: Expr.h:6797
SourceLocation getColonLocFirst() const
Definition: Expr.h:6831
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2664
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:2719
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition: Expr.h:2693
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3518
QualType getElementType() const
Definition: Type.h:3530
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3840
Expr * getLHS() const
Definition: Expr.h:3889
SourceLocation getExprLoc() const
Definition: Expr.h:3880
Expr * getRHS() const
Definition: Expr.h:3891
Opcode getOpcode() const
Definition: Expr.h:3884
bool isPredefinedLibFunction(unsigned ID) const
Determines whether this builtin is a predefined libc/libm function, such as "malloc",...
Definition: Builtins.h:160
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1487
CXXTemporary * getTemporary()
Definition: ExprCXX.h:1505
const Expr * getSubExpr() const
Definition: ExprCXX.h:1509
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1542
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2799
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1367
bool isDynamicClass() const
Definition: DeclCXX.h:585
bool hasDefinition() const
Definition: DeclCXX.h:571
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition: ExprCXX.h:1062
MSGuidDecl * getGuidDecl() const
Definition: ExprCXX.h:1108
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2820
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition: Expr.h:3011
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.cpp:1638
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition: Expr.h:2990
Expr * getCallee()
Definition: Expr.h:2970
arg_range arguments()
Definition: Expr.h:3059
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition: Expr.cpp:1590
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3483
path_iterator path_begin()
Definition: Expr.h:3553
CastKind getCastKind() const
Definition: Expr.h:3527
bool changesVolatileQualification() const
Return.
Definition: Expr.h:3612
path_iterator path_end()
Definition: Expr.h:3554
Expr * getSubExpr()
Definition: Expr.h:3533
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition: CharUnits.h:207
llvm::MaybeAlign getAsMaybeAlign() const
getAsMaybeAlign - Returns Quantity as a valid llvm::Align or std::nullopt, Beware llvm::MaybeAlign as...
Definition: CharUnits.h:194
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
SanitizerSet SanitizeRecover
Set of sanitizer checks that are non-fatal (i.e.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
llvm::Value * getBasePointer() const
Definition: Address.h:170
static Address invalid()
Definition: Address.h:153
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:220
CharUnits getAlignment() const
Definition: Address.h:166
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:184
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition: Address.h:226
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
Definition: Address.h:203
Address setKnownNonNull()
Definition: Address.h:208
void setAlignment(CharUnits Value)
Definition: Address.h:168
void replaceBasePointer(llvm::Value *P)
This function is used in situations where the caller is doing some sort of opaque "laundering" of the...
Definition: Address.h:158
bool isValid() const
Definition: Address.h:154
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:176
An aggregate value slot.
Definition: CGValue.h:512
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:580
Address getAddress() const
Definition: CGValue.h:652
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:621
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:595
RValue asRValue() const
Definition: CGValue.h:674
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:824
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:305
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:292
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:203
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition: CGBuilder.h:331
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition: CGBuilder.h:241
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:412
Address CreateLaunderInvariantGroup(Address Addr)
Definition: CGBuilder.h:436
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:128
Address CreatePreserveUnionAccessIndex(Address Addr, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:428
Address CreateStripInvariantGroup(Address Addr)
Definition: CGBuilder.h:442
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:189
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
Definition: CGBuilder.h:261
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:345
virtual llvm::Function * getKernelStub(llvm::GlobalValue *Handle)=0
Get kernel stub by kernel handle.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:97
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:113
Abstract information about a function or function prototype.
Definition: CGCall.h:40
const GlobalDecl getCalleeDecl() const
Definition: CGCall.h:58
All available information about a concrete callee.
Definition: CGCall.h:62
CGCalleeInfo getAbstractInfo() const
Definition: CGCall.h:172
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition: CGCall.h:164
bool isPseudoDestructor() const
Definition: CGCall.h:161
static CGCallee forBuiltin(unsigned builtinID, const FunctionDecl *builtinDecl)
Definition: CGCall.h:115
unsigned getBuiltinID() const
Definition: CGCall.h:156
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:129
bool isBuiltin() const
Definition: CGCall.h:149
const FunctionDecl * getBuiltinDecl() const
Definition: CGCall.h:152
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E)
Definition: CGCall.h:123
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:55
llvm::DIType * getOrCreateStandaloneType(QualType Ty, SourceLocation Loc)
Emit standalone debug info for a type.
llvm::DIType * getOrCreateRecordType(QualType Ty, SourceLocation L)
Emit record type's standalone debug info.
CGFunctionInfo - Class to encapsulate the information about a function definition.
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, llvm::Value *ivarOffset)=0
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)=0
virtual llvm::Value * EmitIvarOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)=0
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, Address AddrWeakObj)=0
virtual Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel)=0
Get the address of a selector for the specified name and type values.
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, bool threadlocal=false)=0
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
bool isNontemporalDecl(const ValueDecl *VD) const
Checks if the VD variable is marked as nontemporal declaration in current context.
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:258
void add(RValue rvalue, QualType type)
Definition: CGCall.h:282
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
RValue EmitLoadOfGlobalRegLValue(LValue LV)
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitCastLValue(const CastExpr *E)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue EmitCoawaitLValue(const CoawaitExpr *E)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue)
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
llvm::Value * EmitARCLoadWeakRetained(Address addr)
const LangOptions & getLangOpts() const
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
LValue EmitInitListLValue(const InitListExpr *E)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Address EmitExtVectorElementLValue(LValue V)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Upcast
Checking the operand of a cast to a base object.
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
LValue EmitCallExprLValue(const CallExpr *E)
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type,...
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
llvm::Value * EmitARCLoadWeak(Address addr)
const TargetInfo & getTarget() const
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
LValue EmitVAArgExprLValue(const VAArgExpr *E)
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
llvm::Value * EmitCountedByFieldExpr(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
LValue EmitCoyieldLValue(const CoyieldExpr *E)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, llvm::Value *Index, QualType IndexType, QualType IndexedType, bool Accessed)
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitLoadOfExtVectorElementLValue(LValue V)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
LValue EmitDeclRefLValue(const DeclRefExpr *E)
const TargetCodeGenInfo & getTargetHooks() const
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
LValue EmitPredefinedLValue(const PredefinedExpr *E)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
CGCallee EmitCallee(const Expr *E)
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
LValue EmitMemberExpr(const MemberExpr *E)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
CleanupKind getCleanupKind(QualType::DestructionKind kind)
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue)
Address GetAddrOfBlockDecl(const VarDecl *var)
CodeGenTypes & getTypes() const
void EmitARCInitWeak(Address addr, llvm::Value *value)
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
LValue EmitStringLiteralLValue(const StringLiteral *E)
static Destroyer destroyARCStrongPrecise
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
const FieldDecl * FindCountedByField(const FieldDecl *FD)
Find the FieldDecl specified in a FAM's "counted_by" attribute.
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
static Destroyer destroyARCStrongImprecise
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot())
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue)
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
LValue EmitStmtExprLValue(const StmtExpr *E)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
Address emitAddrOfRealComponent(Address complex, QualType complexType)
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
This class organizes the cross-function state that is used while generating LLVM code.
ConstantAddress GetAddrOfMSGuidDecl(const MSGuidDecl *GD)
Get the address of a GUID.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1241
void setDSOLocal(llvm::GlobalValue *GV) const
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
CGDebugInfo * getModuleDebugInfo()
ConstantAddress GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E)
Returns a pointer to a constant global variable for the given file-scope compound literal expression.
llvm::ConstantInt * CreateCrossDsoCfiTypeId(llvm::Metadata *MD)
Generate a cross-DSO type identifier for MD.
void setTypeDescriptorInMap(QualType Ty, llvm::Constant *C)
llvm::FunctionCallee getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo=nullptr, llvm::FunctionType *FnType=nullptr, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Definition: CGCXX.cpp:220
llvm::Constant * GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH=false)
Get the address of the RTTI descriptor for the given type.
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
Address createUnnamedGlobalFrom(const VarDecl &D, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1133
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const TargetInfo & getTarget() const
llvm::Metadata * CreateMetadataIdentifierForType(QualType T)
Create a metadata identifier for the given type.
llvm::Constant * getTypeDescriptorFromMap(QualType Ty)
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
llvm::MDNode * getTBAABaseTypeInfo(QualType QTy)
getTBAABaseTypeInfo - Get metadata that describes the given base access type.
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
ConstantAddress GetWeakRefReference(const ValueDecl *VD)
Get a reference to the target of VD.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
llvm::Metadata * CreateMetadataIdentifierGeneralized(QualType T)
Create a metadata identifier for the generalization of the given type.
const llvm::Triple & getTriple() const
llvm::Constant * getOrCreateStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
Definition: CGDecl.cpp:245
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAInfoForSubobject(LValue Base, QualType AccessType)
getTBAAInfoForSubobject - Get TBAA information for an access with a given base lvalue.
llvm::Constant * CreateRuntimeVariable(llvm::Type *Ty, StringRef Name)
Create a new runtime global variable with the specified type and name.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ConstantAddress GetAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name=".str")
Return a pointer to a constant array for the given string literal.
ASTContext & getContext() const
ConstantAddress GetAddrOfTemplateParamObject(const TemplateParamObjectDecl *TPO)
Get the address of a template parameter object.
TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, TBAAAccessInfo TargetInfo)
mergeTBAAInfoForCast - Get merged TBAA information for the purposes of type casts.
llvm::Constant * GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty=nullptr, ForDefinition_t IsForDefinition=NotForDefinition)
Return the llvm::Constant for the address of the given global variable.
llvm::MDNode * getTBAATypeInfo(QualType QTy)
getTBAATypeInfo - Get metadata used to describe accesses to objects of the given type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
TBAAAccessInfo mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA, TBAAAccessInfo InfoB)
mergeTBAAInfoForConditionalOperator - Get merged TBAA information for the purposes of conditional ope...
llvm::LLVMContext & getLLVMContext()
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
void SetLLVMFunctionAttributes(GlobalDecl GD, const CGFunctionInfo &Info, llvm::Function *F, bool IsThunk)
Set the LLVM function attributes (sext, zext, etc).
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F)
Set the LLVM function attributes which only apply to a function definition.
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
ConstantAddress GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *)
Return a pointer to a constant array for the given ObjCEncodeExpr node.
ConstantAddress GetAddrOfConstantCString(const std::string &Str, const char *GlobalName=nullptr)
Returns a pointer to a character array containing the literal and a terminating '\0' character.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1632
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:680
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
const llvm::DataLayout & getDataLayout() const
Definition: CodeGenTypes.h:104
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
Definition: CGCall.cpp:639
llvm::Type * ConvertTypeForMem(QualType T, bool ForBitField=false)
ConvertTypeForMem - Convert type T into a llvm::Type.
A specialization of Address that requires the address to be an LLVM Constant.
Definition: Address.h:260
ConstantAddress withElementType(llvm::Type *ElemTy) const
Definition: Address.h:276
llvm::Constant * getPointer() const
Definition: Address.h:272
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
llvm::Constant * tryEmitConstantExpr(const ConstantExpr *CE)
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:352
void mergeForCast(const LValueBaseInfo &Info)
Definition: CGValue.h:173
AlignmentSource getAlignmentSource() const
Definition: CGValue.h:170
LValue - This represents an lvalue references.
Definition: CGValue.h:181
bool isBitField() const
Definition: CGValue.h:283
bool isMatrixElt() const
Definition: CGValue.h:286
Expr * getBaseIvarExp() const
Definition: CGValue.h:335
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:417
static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment, QualType type)
Definition: CGValue.h:486
void setObjCIvar(bool Value)
Definition: CGValue.h:301
bool isObjCArray() const
Definition: CGValue.h:303
bool isObjCStrong() const
Definition: CGValue.h:327
bool isGlobalObjCRef() const
Definition: CGValue.h:309
bool isVectorElt() const
Definition: CGValue.h:282
void setObjCArray(bool Value)
Definition: CGValue.h:304
bool isSimple() const
Definition: CGValue.h:281
bool isVolatileQualified() const
Definition: CGValue.h:288
llvm::Value * getMatrixIdx() const
Definition: CGValue.h:403
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:370
llvm::Value * getGlobalReg() const
Definition: CGValue.h:438
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:440
bool isVolatile() const
Definition: CGValue.h:331
const Qualifiers & getQuals() const
Definition: CGValue.h:341
bool isGlobalReg() const
Definition: CGValue.h:285
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:460
bool isObjCWeak() const
Definition: CGValue.h:324
unsigned getVRQualifiers() const
Definition: CGValue.h:290
void setThreadLocalRef(bool Value)
Definition: CGValue.h:313
LValue setKnownNonNull()
Definition: CGValue.h:355
bool isNonGC() const
Definition: CGValue.h:306
void setGlobalObjCRef(bool Value)
Definition: CGValue.h:310
bool isExtVectorElt() const
Definition: CGValue.h:284
llvm::Value * getVectorIdx() const
Definition: CGValue.h:390
void setNontemporal(bool Value)
Definition: CGValue.h:322
LValueBaseInfo getBaseInfo() const
Definition: CGValue.h:349
llvm::Value * getPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:361
void setARCPreciseLifetime(ARCPreciseLifetime_t value)
Definition: CGValue.h:318
QualType getType() const
Definition: CGValue.h:294
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:432
bool isThreadLocalRef() const
Definition: CGValue.h:312
KnownNonNull_t isKnownNonNull() const
Definition: CGValue.h:352
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:338
void setNonGC(bool Value)
Definition: CGValue.h:307
Address getVectorAddress() const
Definition: CGValue.h:378
bool isNontemporal() const
Definition: CGValue.h:321
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition: CGValue.h:476
bool isObjCIvar() const
Definition: CGValue.h:300
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:450
void setAddress(Address address)
Definition: CGValue.h:375
void setBaseIvarExp(Expr *V)
Definition: CGValue.h:336
RValue asAggregateRValue(CodeGenFunction &CGF) const
Definition: CGValue.h:506
Address getExtVectorAddress() const
Definition: CGValue.h:409
static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:496
Address getMatrixAddress() const
Definition: CGValue.h:395
Address getBitFieldAddress() const
Definition: CGValue.h:423
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:41
bool isScalar() const
Definition: CGValue.h:63
static RValue get(llvm::Value *V)
Definition: CGValue.h:97
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:124
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:107
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:82
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:70
An abstract representation of an aligned address.
Definition: Address.h:41
RawAddress withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:99
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:76
llvm::Value * getPointer() const
Definition: Address.h:65
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:356
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition: TargetInfo.h:218
Complex values, per C99 6.2.5p11.
Definition: Type.h:3086
QualType getElementType() const
Definition: Type.h:3096
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3413
bool isFileScope() const
Definition: Expr.h:3440
const Expr * getInitializer() const
Definition: Expr.h:3436
ConstStmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:195
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1072
Represents a concrete matrix type with constant number of rows and columns.
Definition: Type.h:4167
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition: Type.h:4185
Represents a sugar type with __counted_by or __sized_by annotations, including their _or_null variant...
Definition: Type.h:3247
RecordDecl * getOuterLexicalRecordContext()
Retrieve the outermost lexically enclosing record context.
Definition: DeclBase.cpp:1964
decl_range decls() const
decls_begin/decls_end - Iterate over the declarations stored in this context.
Definition: DeclBase.h:2322
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1260
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition: Expr.h:1458
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition: Expr.cpp:488
ValueDecl * getDecl()
Definition: Expr.h:1328
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition: Expr.h:1452
SourceLocation getLocation() const
Definition: Expr.h:1336
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
T * getAttr() const
Definition: DeclBase.h:579
SourceLocation getLocation() const
Definition: DeclBase.h:445
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition: DeclBase.cpp:530
DeclContext * getDeclContext()
Definition: DeclBase.h:454
bool hasAttr() const
Definition: DeclBase.h:583
void ConvertArgToString(ArgumentKind Kind, intptr_t Val, StringRef Modifier, StringRef Argument, ArrayRef< ArgumentValue > PrevArgs, SmallVectorImpl< char > &Output, ArrayRef< intptr_t > QualTypeVals) const
Converts a diagnostic argument (as an intptr_t) into the string that represents it.
Definition: Diagnostic.h:880
Represents an enum.
Definition: Decl.h:3867
bool isFixed() const
Returns true if this is an Objective-C, C++11, or Microsoft-style enumeration with a fixed underlying...
Definition: Decl.h:4081
void getValueRange(llvm::APInt &Max, llvm::APInt &Min) const
Calculates the [Min,Max) values the enum can store based on the NumPositiveBits and NumNegativeBits.
Definition: Decl.cpp:4973
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Definition: Type.h:5575
EnumDecl * getDecl() const
Definition: Type.h:5582
ExplicitCastExpr - An explicit cast written in the source code.
Definition: Expr.h:3730
This represents one expression.
Definition: Expr.h:110
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition: Expr.cpp:82
bool isGLValue() const
Definition: Expr.h:280
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3086
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition: Expr.h:437
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition: Expr.cpp:3059
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3047
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3055
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsLValue - Evaluate an expression to see if we can fold it to an lvalue with link time known ...
bool isPRValue() const
Definition: Expr.h:278
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition: Expr.cpp:1545
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3556
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3039
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition: Expr.h:469
bool isFlexibleArrayMemberLike(ASTContext &Context, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution=false) const
Check whether this array fits the idiom of a flexible array member, depending on the value of -fstric...
Definition: Expr.cpp:206
QualType getType() const
Definition: Expr.h:142
bool isOBJCGCCandidate(ASTContext &Ctx) const
isOBJCGCCandidate - Return true if this expression may be used in a read/ write barrier.
Definition: Expr.cpp:2970
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:6113
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition: Expr.cpp:4283
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition: Expr.cpp:4315
const Expr * getBase() const
Definition: Expr.h:6130
Represents a member of a struct/union/class.
Definition: Decl.h:3057
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:3148
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.cpp:4644
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3270
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
Definition: Decl.cpp:4602
FullExpr - Represents a "full-expression" node.
Definition: Expr.h:1039
const Expr * getSubExpr() const
Definition: Expr.h:1052
Represents a function declaration or definition.
Definition: Decl.h:1971
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3632
Represents a prototype with parameter type info, e.g.
Definition: Type.h:4656
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
const Decl * getDecl() const
Definition: GlobalDecl.h:103
Describes an C or C++ initializer list.
Definition: Expr.h:4847
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition: Expr.cpp:2432
const Expr * getInit(unsigned Init) const
Definition: Expr.h:4893
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:467
virtual void mangleCXXRTTI(QualType T, raw_ostream &)=0
unsigned getBlockId(const BlockDecl *BD, bool Local)
Definition: Mangle.h:84
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4710
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition: ExprCXX.h:4735
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition: ExprCXX.h:4727
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition: ExprCXX.h:4760
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2742
bool isIncomplete() const
Definition: Expr.h:2762
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3172
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition: Expr.h:3255
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why? This is only meaningful if the named memb...
Definition: Expr.h:3396
Expr * getBase() const
Definition: Expr.h:3249
bool isArrow() const
Definition: Expr.h:3356
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:3367
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3460
bool isObjCBOOLType(QualType T) const
Returns true if.
Definition: NSAPI.cpp:477
This represents a decl that may have a name.
Definition: Decl.h:249
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:276
A C++ nested-name-specifier augmented with source location information.
bool containsType(SanitizerMask Mask, StringRef MangledTypeName, StringRef Category=StringRef()) const
ObjCEncodeExpr, used for @encode in Objective-C.
Definition: ExprObjC.h:410
Represents an ObjC class declaration.
Definition: DeclObjC.h:1153
ObjCIvarDecl - Represents an ObjC instance variable.
Definition: DeclObjC.h:1950
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
ObjCIvarDecl * getDecl()
Definition: ExprObjC.h:579
bool isArrow() const
Definition: ExprObjC.h:587
const Expr * getBase() const
Definition: ExprObjC.h:583
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:945
const ObjCMethodDecl * getMethodDecl() const
Definition: ExprObjC.h:1356
QualType getReturnType() const
Definition: DeclObjC.h:329
Represents a class type in Objective C.
Definition: Type.h:6754
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:455
Selector getSelector() const
Definition: ExprObjC.h:469
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1168
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1218
bool isUnique() const
Definition: Expr.h:1226
ParenExpr - This represents a parethesized expression, e.g.
Definition: Expr.h:2130
const Expr * getSubExpr() const
Definition: Expr.h:2145
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3139
QualType getPointeeType() const
Definition: Type.h:3149
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1986
StringRef getIdentKindName() const
Definition: Expr.h:2043
PredefinedIdentKind getIdentKind() const
Definition: Expr.h:2021
StringLiteral * getFunctionName()
Definition: Expr.h:2030
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
bool isValid() const
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6305
semantics_iterator semantics_end()
Definition: Expr.h:6377
semantics_iterator semantics_begin()
Definition: Expr.h:6371
const Expr *const * const_semantics_iterator
Definition: Expr.h:6370
Expr * getResultExpr()
Return the result-bearing expression, or null if there is none.
Definition: Expr.h:6358
A (possibly-)qualified type.
Definition: Type.h:940
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:7443
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:1007
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:7485
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:7399
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: Type.h:1432
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: Type.h:7560
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:7453
QualType withCVRQualifiers(unsigned CVR) const
Definition: Type.h:1174
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: Type.h:1530
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition: Type.h:1039
The collection of all-type qualifiers we support.
Definition: Type.h:318
unsigned getCVRQualifiers() const
Definition: Type.h:474
GC getObjCGCAttr() const
Definition: Type.h:505
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: Type.h:347
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: Type.h:340
@ OCL_None
There is no lifetime qualification on this type.
Definition: Type.h:336
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: Type.h:350
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: Type.h:353
bool hasConst() const
Definition: Type.h:443
void addCVRQualifiers(unsigned mask)
Definition: Type.h:488
void removeObjCGCAttr()
Definition: Type.h:509
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition: Type.h:636
void setAddressSpace(LangAS space)
Definition: Type.h:577
bool hasVolatile() const
Definition: Type.h:453
ObjCLifetime getObjCLifetime() const
Definition: Type.h:531
void addVolatile()
Definition: Type.h:456
Represents a struct/union/class.
Definition: Decl.h:4168
field_range fields() const
Definition: Decl.h:4374
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition: Decl.h:4359
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5549
decl_type * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Definition: Redeclarable.h:204
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4383
Stmt - This represents one statement.
Definition: Stmt.h:84
StmtClass getStmtClass() const
Definition: Stmt.h:1358
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:338
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1773
bool isUnion() const
Definition: Decl.h:3790
Exposes information about the current target.
Definition: TargetInfo.h:218
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1256
virtual StringRef getABI() const
Get the ABI currently in use.
Definition: TargetInfo.h:1324
The type-property cache.
Definition: Type.cpp:4368
The base class of the type hierarchy.
Definition: Type.h:1813
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1871
bool isBlockPointerType() const
Definition: Type.h:7620
bool isVoidType() const
Definition: Type.h:7905
bool isBooleanType() const
Definition: Type.h:8033
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2156
bool hasAttr(attr::Kind AK) const
Determine whether this type had the specified attribute applied to it (looking through top-level type...
Definition: Type.cpp:1888
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition: Type.cpp:2135
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition: Type.h:8202
bool isArrayType() const
Definition: Type.h:7678
bool isFunctionPointerType() const
Definition: Type.h:7646
bool isCountAttributedType() const
Definition: Type.cpp:684
bool isArithmeticType() const
Definition: Type.cpp:2270
bool isConstantMatrixType() const
Definition: Type.h:7736
bool isPointerType() const
Definition: Type.h:7612
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:7945
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8193
bool isReferenceType() const
Definition: Type.h:7624
bool isVariableArrayType() const
Definition: Type.h:7690
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:695
bool isExtVectorBoolType() const
Definition: Type.h:7726
bool isAnyComplexType() const
Definition: Type.h:7714
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition: Type.h:8076
bool isAtomicType() const
Definition: Type.h:7757
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2671
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition: Type.cpp:2351
bool isFunctionType() const
Definition: Type.h:7608
bool isObjCObjectPointerType() const
Definition: Type.h:7744
bool isVectorType() const
Definition: Type.h:7718
bool isFloatingType() const
Definition: Type.cpp:2238
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8126
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition: Type.cpp:605
bool isRecordType() const
Definition: Type.h:7706
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.cpp:1875
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2183
SourceLocation getExprLoc() const
Definition: Expr.h:2311
Expr * getSubExpr() const
Definition: Expr.h:2228
Opcode getOpcode() const
Definition: Expr.h:2223
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4667
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:706
QualType getType() const
Definition: Decl.h:717
QualType getType() const
Definition: Value.cpp:234
Represents a variable declaration or definition.
Definition: Decl.h:918
TLSKind getTLSKind() const
Definition: Decl.cpp:2165
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition: Decl.cpp:2363
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition: Decl.h:1171
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition: Decl.h:944
@ TLS_None
Not a TLS variable.
Definition: Decl.h:938
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3747
Represents a GCC generic vector type.
Definition: Type.h:3969
unsigned getNumElements() const
Definition: Type.h:3984
#define INT_MIN
Definition: limits.h:55
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:140
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
@ ARCImpreciseLifetime
Definition: CGValue.h:135
static AlignmentSource getFieldAlignmentSource(AlignmentSource Source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
Definition: CGValue.h:158
@ NotKnownNonNull
Definition: Address.h:32
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const AstTypeMatcher< FunctionType > functionType
Matches FunctionType nodes.
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
Definition: StoreRef.h:27
bool This(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1899
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1873
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1393
bool Cast(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1713
The JSON file list parser is used to communicate input to InstallAPI.
@ OpenCL
Definition: LangStandard.h:65
@ CPlusPlus
Definition: LangStandard.h:55
@ OK_BitField
A bitfield object is a bitfield on a C or C++ record.
Definition: Specifiers.h:151
@ SC_Register
Definition: Specifiers.h:254
@ Asm
Assembly: we accept this only so that we can preprocess it.
StorageDuration
The storage duration for an object (per C++ [basic.stc]).
Definition: Specifiers.h:324
@ SD_Thread
Thread storage duration.
Definition: Specifiers.h:327
@ SD_Static
Static storage duration.
Definition: Specifiers.h:328
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition: Specifiers.h:325
@ SD_Automatic
Automatic storage duration (most local variables).
Definition: Specifiers.h:326
@ SD_Dynamic
Dynamic storage duration.
Definition: Specifiers.h:329
@ Result
The result type of a method or function.
@ Dtor_Complete
Complete object dtor.
Definition: ABI.h:35
llvm::hash_code hash_value(const CustomizableOptional< T > &O)
const FunctionProtoType * T
LangAS getLangASFromTargetAS(unsigned TargetAS)
Definition: AddressSpaces.h:86
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
@ Other
Other implicit parameter.
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition: Specifiers.h:174
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition: Specifiers.h:177
unsigned long uint64_t
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type,...
Structure with information about how a bitfield should be accessed.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
llvm::MDNode * AccessType
AccessType - The final access type.
Definition: CodeGenTBAA.h:105
uint64_t Offset
Offset - The byte offset of the final access within the base one.
Definition: CodeGenTBAA.h:109
static TBAAAccessInfo getMayAliasInfo()
Definition: CodeGenTBAA.h:63
uint64_t Size
Size - The size of access, in bytes.
Definition: CodeGenTBAA.h:112
llvm::MDNode * BaseType
BaseType - The base/leading access type.
Definition: CodeGenTBAA.h:101
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition: Expr.h:609
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:168
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:159
An adjustment to be made to the temporary created when emitting a reference binding,...
Definition: Expr.h:66