clang 20.0.0git
CodeGenFunction.h
Go to the documentation of this file.
1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the internal per-function state used for llvm translation.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15
16#include "CGBuilder.h"
17#include "CGDebugInfo.h"
18#include "CGLoopInfo.h"
19#include "CGValue.h"
20#include "CodeGenModule.h"
21#include "CodeGenPGO.h"
22#include "EHScopeStack.h"
23#include "VarBypassDetector.h"
24#include "clang/AST/CharUnits.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
31#include "clang/AST/Type.h"
32#include "clang/Basic/ABI.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
39#include "llvm/ADT/MapVector.h"
40#include "llvm/ADT/SmallVector.h"
41#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/ValueHandle.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Transforms/Utils/SanitizerStats.h"
46#include <optional>
47
48namespace llvm {
49class BasicBlock;
50class LLVMContext;
51class MDNode;
52class SwitchInst;
53class Twine;
54class Value;
55class CanonicalLoopInfo;
56}
57
58namespace clang {
59class ASTContext;
60class CXXDestructorDecl;
61class CXXForRangeStmt;
62class CXXTryStmt;
63class Decl;
64class LabelDecl;
65class FunctionDecl;
66class FunctionProtoType;
67class LabelStmt;
68class ObjCContainerDecl;
69class ObjCInterfaceDecl;
70class ObjCIvarDecl;
71class ObjCMethodDecl;
72class ObjCImplementationDecl;
73class ObjCPropertyImplDecl;
74class TargetInfo;
75class VarDecl;
76class ObjCForCollectionStmt;
77class ObjCAtTryStmt;
78class ObjCAtThrowStmt;
79class ObjCAtSynchronizedStmt;
80class ObjCAutoreleasePoolStmt;
81class OMPUseDevicePtrClause;
82class OMPUseDeviceAddrClause;
83class SVETypeFlags;
84class OMPExecutableDirective;
85
86namespace analyze_os_log {
87class OSLogBufferLayout;
88}
89
90namespace CodeGen {
91class CodeGenTypes;
92class CGCallee;
93class CGFunctionInfo;
94class CGBlockInfo;
95class CGCXXABI;
96class BlockByrefHelpers;
97class BlockByrefInfo;
98class BlockFieldFlags;
99class RegionCodeGenTy;
100class TargetCodeGenInfo;
101struct OMPTaskDataTy;
102struct CGCoroData;
103
104/// The kind of evaluation to perform on values of a particular
105/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
106/// CGExprAgg?
107///
108/// TODO: should vectors maybe be split out into their own thing?
114
115#define LIST_SANITIZER_CHECKS \
116 SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
117 SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
118 SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
119 SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
120 SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
121 SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
122 SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
123 SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
124 SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
125 SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
126 SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
127 SANITIZER_CHECK(MissingReturn, missing_return, 0) \
128 SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
129 SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
130 SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
131 SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
132 SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
133 SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
134 SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
135 SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
136 SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
137 SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
138 SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
139 SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
140 SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0) \
141 SANITIZER_CHECK(BoundsSafety, bounds_safety, 0)
142
144#define SANITIZER_CHECK(Enum, Name, Version) Enum,
146#undef SANITIZER_CHECK
148
149/// Helper class with most of the code for saving a value for a
150/// conditional expression cleanup.
152 typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
153
154 /// Answer whether the given value needs extra work to be saved.
155 static bool needsSaving(llvm::Value *value) {
156 if (!value)
157 return false;
158
159 // If it's not an instruction, we don't need to save.
160 if (!isa<llvm::Instruction>(value)) return false;
161
162 // If it's an instruction in the entry block, we don't need to save.
163 llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
164 return (block != &block->getParent()->getEntryBlock());
165 }
166
167 static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
168 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
169};
170
171/// A partial specialization of DominatingValue for llvm::Values that
172/// might be llvm::Instructions.
173template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
174 typedef T *type;
176 return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
177 }
178};
179
180/// A specialization of DominatingValue for Address.
181template <> struct DominatingValue<Address> {
182 typedef Address type;
183
184 struct saved_type {
186 llvm::Type *ElementType;
189 llvm::PointerType *EffectiveType;
190 };
191
192 static bool needsSaving(type value) {
195 return true;
196 return false;
197 }
198 static saved_type save(CodeGenFunction &CGF, type value) {
199 return {DominatingLLVMValue::save(CGF, value.getBasePointer()),
200 value.getElementType(), value.getAlignment(),
201 DominatingLLVMValue::save(CGF, value.getOffset()), value.getType()};
202 }
204 return Address(DominatingLLVMValue::restore(CGF, value.BasePtr),
205 value.ElementType, value.Alignment, CGPointerAuthInfo(),
206 DominatingLLVMValue::restore(CGF, value.Offset));
207 }
208};
209
210/// A specialization of DominatingValue for RValue.
211template <> struct DominatingValue<RValue> {
212 typedef RValue type;
214 enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
215 AggregateAddress, ComplexAddress };
216 union {
217 struct {
219 } Vals;
221 };
222 LLVM_PREFERRED_TYPE(Kind)
223 unsigned K : 3;
224
226 : Vals{Val1, DominatingLLVMValue::saved_type()}, K(K) {}
227
230 : Vals{Val1, Val2}, K(ComplexAddress) {}
231
232 saved_type(DominatingValue<Address>::saved_type AggregateAddr, unsigned K)
233 : AggregateAddr(AggregateAddr), K(K) {}
234
235 public:
236 static bool needsSaving(RValue value);
239
240 // implementations in CGCleanup.cpp
241 };
242
243 static bool needsSaving(type value) {
244 return saved_type::needsSaving(value);
245 }
246 static saved_type save(CodeGenFunction &CGF, type value) {
247 return saved_type::save(CGF, value);
248 }
250 return value.restore(CGF);
251 }
252};
253
254/// CodeGenFunction - This class organizes the per-function state that is used
255/// while generating LLVM code.
257 CodeGenFunction(const CodeGenFunction &) = delete;
258 void operator=(const CodeGenFunction &) = delete;
259
260 friend class CGCXXABI;
261public:
262 /// A jump destination is an abstract label, branching to which may
263 /// require a jump out through normal cleanups.
264 struct JumpDest {
265 JumpDest() : Block(nullptr), Index(0) {}
266 JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
267 unsigned Index)
268 : Block(Block), ScopeDepth(Depth), Index(Index) {}
269
270 bool isValid() const { return Block != nullptr; }
271 llvm::BasicBlock *getBlock() const { return Block; }
272 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
273 unsigned getDestIndex() const { return Index; }
274
275 // This should be used cautiously.
277 ScopeDepth = depth;
278 }
279
280 private:
281 llvm::BasicBlock *Block;
283 unsigned Index;
284 };
285
286 CodeGenModule &CGM; // Per-module state.
288
289 // For EH/SEH outlined funclets, this field points to parent's CGF
291
292 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
295
296 // Stores variables for which we can't generate correct lifetime markers
297 // because of jumps.
299
300 /// List of recently emitted OMPCanonicalLoops.
301 ///
302 /// Since OMPCanonicalLoops are nested inside other statements (in particular
303 /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
304 /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
305 /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
306 /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
307 /// this stack when done. Entering a new loop requires clearing this list; it
308 /// either means we start parsing a new loop nest (in which case the previous
309 /// loop nest goes out of scope) or a second loop in the same level in which
310 /// case it would be ambiguous into which of the two (or more) loops the loop
311 /// nest would extend.
313
314 /// Stack to track the Logical Operator recursion nest for MC/DC.
316
317 /// Stack to track the controlled convergence tokens.
319
320 /// Number of nested loop to be consumed by the last surrounding
321 /// loop-associated directive.
323
324 // CodeGen lambda for loops and support for ordered clause
325 typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
326 JumpDest)>
328 typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
329 const unsigned, const bool)>
331
332 // Codegen lambda for loop bounds in worksharing loop constructs
333 typedef llvm::function_ref<std::pair<LValue, LValue>(
336
337 // Codegen lambda for loop bounds in dispatch-based loop implementation
338 typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
340 Address UB)>
342
343 /// CGBuilder insert helper. This function is called after an
344 /// instruction is created using Builder.
345 void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
346 llvm::BasicBlock::iterator InsertPt) const;
347
348 /// CurFuncDecl - Holds the Decl for the current outermost
349 /// non-closure context.
350 const Decl *CurFuncDecl = nullptr;
351 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
352 const Decl *CurCodeDecl = nullptr;
353 const CGFunctionInfo *CurFnInfo = nullptr;
355 llvm::Function *CurFn = nullptr;
356
357 /// Save Parameter Decl for coroutine.
359
360 // Holds coroutine data if the current function is a coroutine. We use a
361 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
362 // in this header.
363 struct CGCoroInfo {
364 std::unique_ptr<CGCoroData> Data;
365 bool InSuspendBlock = false;
366 CGCoroInfo();
367 ~CGCoroInfo();
368 };
370
371 bool isCoroutine() const {
372 return CurCoro.Data != nullptr;
373 }
374
375 bool inSuspendBlock() const {
377 }
378
379 // Holds FramePtr for await_suspend wrapper generation,
380 // so that __builtin_coro_frame call can be lowered
381 // directly to value of its second argument
383 llvm::Value *FramePtr = nullptr;
384 };
386
387 // Generates wrapper function for `llvm.coro.await.suspend.*` intrinisics.
388 // It encapsulates SuspendExpr in a function, to separate it's body
389 // from the main coroutine to avoid miscompilations. Intrinisic
390 // is lowered to this function call in CoroSplit pass
391 // Function signature is:
392 // <type> __await_suspend_wrapper_<name>(ptr %awaiter, ptr %hdl)
393 // where type is one of (void, i1, ptr)
394 llvm::Function *generateAwaitSuspendWrapper(Twine const &CoroName,
395 Twine const &SuspendPointName,
396 CoroutineSuspendExpr const &S);
397
398 /// CurGD - The GlobalDecl for the current function being compiled.
400
401 /// PrologueCleanupDepth - The cleanup depth enclosing all the
402 /// cleanups associated with the parameters.
404
405 /// ReturnBlock - Unified return block.
407
408 /// ReturnValue - The temporary alloca to hold the return
409 /// value. This is invalid iff the function has no return value.
411
412 /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
413 /// This is invalid if sret is not in use.
415
416 /// If a return statement is being visited, this holds the return statment's
417 /// result expression.
418 const Expr *RetExpr = nullptr;
419
420 /// Return true if a label was seen in the current scope.
422 if (CurLexicalScope)
423 return CurLexicalScope->hasLabels();
424 return !LabelMap.empty();
425 }
426
427 /// AllocaInsertPoint - This is an instruction in the entry block before which
428 /// we prefer to insert allocas.
429 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
430
431private:
432 /// PostAllocaInsertPt - This is a place in the prologue where code can be
433 /// inserted that will be dominated by all the static allocas. This helps
434 /// achieve two things:
435 /// 1. Contiguity of all static allocas (within the prologue) is maintained.
436 /// 2. All other prologue code (which are dominated by static allocas) do
437 /// appear in the source order immediately after all static allocas.
438 ///
439 /// PostAllocaInsertPt will be lazily created when it is *really* required.
440 llvm::AssertingVH<llvm::Instruction> PostAllocaInsertPt = nullptr;
441
442public:
443 /// Return PostAllocaInsertPt. If it is not yet created, then insert it
444 /// immediately after AllocaInsertPt.
445 llvm::Instruction *getPostAllocaInsertPoint() {
446 if (!PostAllocaInsertPt) {
447 assert(AllocaInsertPt &&
448 "Expected static alloca insertion point at function prologue");
449 assert(AllocaInsertPt->getParent()->isEntryBlock() &&
450 "EBB should be entry block of the current code gen function");
451 PostAllocaInsertPt = AllocaInsertPt->clone();
452 PostAllocaInsertPt->setName("postallocapt");
453 PostAllocaInsertPt->insertAfter(AllocaInsertPt);
454 }
455
456 return PostAllocaInsertPt;
457 }
458
459 /// API for captured statement code generation.
461 public:
463 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
466 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
467
469 S.getCapturedRecordDecl()->field_begin();
470 for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
471 E = S.capture_end();
472 I != E; ++I, ++Field) {
473 if (I->capturesThis())
474 CXXThisFieldDecl = *Field;
475 else if (I->capturesVariable())
476 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
477 else if (I->capturesVariableByCopy())
478 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
479 }
480 }
481
482 virtual ~CGCapturedStmtInfo();
483
484 CapturedRegionKind getKind() const { return Kind; }
485
486 virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
487 // Retrieve the value of the context parameter.
488 virtual llvm::Value *getContextValue() const { return ThisValue; }
489
490 /// Lookup the captured field decl for a variable.
491 virtual const FieldDecl *lookup(const VarDecl *VD) const {
492 return CaptureFields.lookup(VD->getCanonicalDecl());
493 }
494
495 bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
496 virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
497
498 static bool classof(const CGCapturedStmtInfo *) {
499 return true;
500 }
501
502 /// Emit the captured statement body.
503 virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
505 CGF.EmitStmt(S);
506 }
507
508 /// Get the name of the capture helper.
509 virtual StringRef getHelperName() const { return "__captured_stmt"; }
510
511 /// Get the CaptureFields
512 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
513 return CaptureFields;
514 }
515
516 private:
517 /// The kind of captured statement being generated.
519
520 /// Keep the map between VarDecl and FieldDecl.
521 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
522
523 /// The base address of the captured record, passed in as the first
524 /// argument of the parallel region function.
525 llvm::Value *ThisValue;
526
527 /// Captured 'this' type.
528 FieldDecl *CXXThisFieldDecl;
529 };
531
532 /// RAII for correct setting/restoring of CapturedStmtInfo.
534 private:
535 CodeGenFunction &CGF;
536 CGCapturedStmtInfo *PrevCapturedStmtInfo;
537 public:
539 CGCapturedStmtInfo *NewCapturedStmtInfo)
540 : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
541 CGF.CapturedStmtInfo = NewCapturedStmtInfo;
542 }
543 ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
544 };
545
546 /// An abstract representation of regular/ObjC call/message targets.
548 /// The function declaration of the callee.
549 const Decl *CalleeDecl;
550
551 public:
552 AbstractCallee() : CalleeDecl(nullptr) {}
553 AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
554 AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
555 bool hasFunctionDecl() const {
556 return isa_and_nonnull<FunctionDecl>(CalleeDecl);
557 }
558 const Decl *getDecl() const { return CalleeDecl; }
559 unsigned getNumParams() const {
560 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
561 return FD->getNumParams();
562 return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
563 }
564 const ParmVarDecl *getParamDecl(unsigned I) const {
565 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
566 return FD->getParamDecl(I);
567 return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
568 }
569 };
570
571 /// Sanitizers enabled for this function.
573
574 /// True if CodeGen currently emits code implementing sanitizer checks.
575 bool IsSanitizerScope = false;
576
577 /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
579 CodeGenFunction *CGF;
580 public:
583 };
584
585 /// In C++, whether we are code generating a thunk. This controls whether we
586 /// should emit cleanups.
587 bool CurFuncIsThunk = false;
588
589 /// In ARC, whether we should autorelease the return value.
590 bool AutoreleaseResult = false;
591
592 /// Whether we processed a Microsoft-style asm block during CodeGen. These can
593 /// potentially set the return value.
594 bool SawAsmBlock = false;
595
597
598 /// True if the current function is an outlined SEH helper. This can be a
599 /// finally block or filter expression.
601
602 /// True if CodeGen currently emits code inside presereved access index
603 /// region.
605
606 /// True if the current statement has nomerge attribute.
608
609 /// True if the current statement has noinline attribute.
611
612 /// True if the current statement has always_inline attribute.
614
615 /// True if the current statement has noconvergent attribute.
617
618 // The CallExpr within the current statement that the musttail attribute
619 // applies to. nullptr if there is no 'musttail' on the current statement.
620 const CallExpr *MustTailCall = nullptr;
621
622 /// Returns true if a function must make progress, which means the
623 /// mustprogress attribute can be added.
625 if (CGM.getCodeGenOpts().getFiniteLoops() ==
627 return false;
628
629 // C++11 and later guarantees that a thread eventually will do one of the
630 // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
631 // - terminate,
632 // - make a call to a library I/O function,
633 // - perform an access through a volatile glvalue, or
634 // - perform a synchronization operation or an atomic operation.
635 //
636 // Hence each function is 'mustprogress' in C++11 or later.
637 return getLangOpts().CPlusPlus11;
638 }
639
640 /// Returns true if a loop must make progress, which means the mustprogress
641 /// attribute can be added. \p HasConstantCond indicates whether the branch
642 /// condition is a known constant.
643 bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody);
644
646 llvm::Value *BlockPointer = nullptr;
647
648 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
650
651 /// A mapping from NRVO variables to the flags used to indicate
652 /// when the NRVO has been applied to this variable.
653 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
654
657
658 // A stack of cleanups which were added to EHStack but have to be deactivated
659 // later before being popped or emitted. These are usually deactivated on
660 // exiting a `CleanupDeactivationScope` scope. For instance, after a
661 // full-expr.
662 //
663 // These are specially useful for correctly emitting cleanups while
664 // encountering branches out of expression (through stmt-expr or coroutine
665 // suspensions).
668 llvm::Instruction *DominatingIP;
669 };
671
672 // Enters a new scope for capturing cleanups which are deferred to be
673 // deactivated, all of which will be deactivated once the scope is exited.
682
684 assert(!Deactivated && "Deactivating already deactivated scope");
686 for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) {
687 CGF.DeactivateCleanupBlock(Stack[I - 1].Cleanup,
688 Stack[I - 1].DominatingIP);
689 Stack[I - 1].DominatingIP->eraseFromParent();
690 }
691 Stack.resize(OldDeactivateCleanupStackSize);
692 Deactivated = true;
693 }
694
696 if (Deactivated)
697 return;
699 }
700 };
701
703
704 llvm::Instruction *CurrentFuncletPad = nullptr;
705
707 bool isRedundantBeforeReturn() override { return true; }
708
709 llvm::Value *Addr;
710 llvm::Value *Size;
711
712 public:
713 CallLifetimeEnd(RawAddress addr, llvm::Value *size)
714 : Addr(addr.getPointer()), Size(size) {}
715
716 void Emit(CodeGenFunction &CGF, Flags flags) override {
717 CGF.EmitLifetimeEnd(Size, Addr);
718 }
719 };
720
721 /// Header for data within LifetimeExtendedCleanupStack.
723 /// The size of the following cleanup object.
724 unsigned Size;
725 /// The kind of cleanup to push.
726 LLVM_PREFERRED_TYPE(CleanupKind)
728 /// Whether this is a conditional cleanup.
729 LLVM_PREFERRED_TYPE(bool)
730 unsigned IsConditional : 1;
731
732 size_t getSize() const { return Size; }
733 CleanupKind getKind() const { return (CleanupKind)Kind; }
734 bool isConditional() const { return IsConditional; }
735 };
736
737 /// i32s containing the indexes of the cleanup destinations.
739
741
742 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
743 llvm::BasicBlock *EHResumeBlock = nullptr;
744
745 /// The exception slot. All landing pads write the current exception pointer
746 /// into this alloca.
747 llvm::Value *ExceptionSlot = nullptr;
748
749 /// The selector slot. Under the MandatoryCleanup model, all landing pads
750 /// write the current selector value into this alloca.
751 llvm::AllocaInst *EHSelectorSlot = nullptr;
752
753 /// A stack of exception code slots. Entering an __except block pushes a slot
754 /// on the stack and leaving pops one. The __exception_code() intrinsic loads
755 /// a value from the top of the stack.
757
758 /// Value returned by __exception_info intrinsic.
759 llvm::Value *SEHInfo = nullptr;
760
761 /// Emits a landing pad for the current EH stack.
762 llvm::BasicBlock *EmitLandingPad();
763
764 llvm::BasicBlock *getInvokeDestImpl();
765
766 /// Parent loop-based directive for scan directive.
768 llvm::BasicBlock *OMPBeforeScanBlock = nullptr;
769 llvm::BasicBlock *OMPAfterScanBlock = nullptr;
770 llvm::BasicBlock *OMPScanExitBlock = nullptr;
771 llvm::BasicBlock *OMPScanDispatch = nullptr;
772 bool OMPFirstScanLoop = false;
773
774 /// Manages parent directive for scan directives.
776 CodeGenFunction &CGF;
777 const OMPExecutableDirective *ParentLoopDirectiveForScan;
778
779 public:
781 CodeGenFunction &CGF,
782 const OMPExecutableDirective &ParentLoopDirectiveForScan)
783 : CGF(CGF),
784 ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) {
785 CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan;
786 }
788 CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan;
789 }
790 };
791
792 template <class T>
794 return DominatingValue<T>::save(*this, value);
795 }
796
798 public:
799 CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
802
803 private:
804 void ConstructorHelper(FPOptions FPFeatures);
805 CodeGenFunction &CGF;
806 FPOptions OldFPFeatures;
807 llvm::fp::ExceptionBehavior OldExcept;
808 llvm::RoundingMode OldRounding;
809 std::optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
810 };
812
813public:
814 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
815 /// rethrows.
817
818 /// A class controlling the emission of a finally block.
820 /// Where the catchall's edge through the cleanup should go.
821 JumpDest RethrowDest;
822
823 /// A function to call to enter the catch.
824 llvm::FunctionCallee BeginCatchFn;
825
826 /// An i1 variable indicating whether or not the @finally is
827 /// running for an exception.
828 llvm::AllocaInst *ForEHVar = nullptr;
829
830 /// An i8* variable into which the exception pointer to rethrow
831 /// has been saved.
832 llvm::AllocaInst *SavedExnVar = nullptr;
833
834 public:
835 void enter(CodeGenFunction &CGF, const Stmt *Finally,
836 llvm::FunctionCallee beginCatchFn,
837 llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
838 void exit(CodeGenFunction &CGF);
839 };
840
841 /// Returns true inside SEH __try blocks.
842 bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
843
844 /// Returns true while emitting a cleanuppad.
845 bool isCleanupPadScope() const {
846 return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
847 }
848
849 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
850 /// current full-expression. Safe against the possibility that
851 /// we're currently inside a conditionally-evaluated expression.
852 template <class T, class... As>
853 void pushFullExprCleanup(CleanupKind kind, As... A) {
854 // If we're not in a conditional branch, or if none of the
855 // arguments requires saving, then use the unconditional cleanup.
857 return EHStack.pushCleanup<T>(kind, A...);
858
859 // Stash values in a tuple so we can guarantee the order of saves.
860 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
861 SavedTuple Saved{saveValueInCond(A)...};
862
863 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
864 EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
866 }
867
868 /// Queue a cleanup to be pushed after finishing the current full-expression,
869 /// potentially with an active flag.
870 template <class T, class... As>
873 return pushCleanupAfterFullExprWithActiveFlag<T>(
874 Kind, RawAddress::invalid(), A...);
875
876 RawAddress ActiveFlag = createCleanupActiveFlag();
877 assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
878 "cleanup active flag should never need saving");
879
880 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
881 SavedTuple Saved{saveValueInCond(A)...};
882
883 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
884 pushCleanupAfterFullExprWithActiveFlag<CleanupType>(Kind, ActiveFlag, Saved);
885 }
886
887 template <class T, class... As>
889 RawAddress ActiveFlag, As... A) {
890 LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
891 ActiveFlag.isValid()};
892
895 LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
896 (Header.IsConditional ? sizeof(ActiveFlag) : 0));
897
898 static_assert(sizeof(Header) % alignof(T) == 0,
899 "Cleanup will be allocated on misaligned address");
900 char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
901 new (Buffer) LifetimeExtendedCleanupHeader(Header);
902 new (Buffer + sizeof(Header)) T(A...);
903 if (Header.IsConditional)
904 new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag);
905 }
906
907 // Push a cleanup onto EHStack and deactivate it later. It is usually
908 // deactivated when exiting a `CleanupDeactivationScope` (for example: after a
909 // full expression).
910 template <class T, class... As>
912 // Placeholder dominating IP for this cleanup.
913 llvm::Instruction *DominatingIP =
914 Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
915 EHStack.pushCleanup<T>(Kind, A...);
917 {EHStack.stable_begin(), DominatingIP});
918 }
919
920 /// Set up the last cleanup that was pushed as a conditional
921 /// full-expression cleanup.
924 }
925
928
929 /// PushDestructorCleanup - Push a cleanup to call the
930 /// complete-object destructor of an object of the given type at the
931 /// given address. Does nothing if T is not a C++ class type with a
932 /// non-trivial destructor.
934
935 /// PushDestructorCleanup - Push a cleanup to call the
936 /// complete-object variant of the given destructor on the object at
937 /// the given address.
939 Address Addr);
940
941 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
942 /// process all branch fixups.
943 void PopCleanupBlock(bool FallThroughIsBranchThrough = false,
944 bool ForDeactivation = false);
945
946 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
947 /// The block cannot be reactivated. Pops it if it's the top of the
948 /// stack.
949 ///
950 /// \param DominatingIP - An instruction which is known to
951 /// dominate the current IP (if set) and which lies along
952 /// all paths of execution between the current IP and the
953 /// the point at which the cleanup comes into scope.
955 llvm::Instruction *DominatingIP);
956
957 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
958 /// Cannot be used to resurrect a deactivated cleanup.
959 ///
960 /// \param DominatingIP - An instruction which is known to
961 /// dominate the current IP (if set) and which lies along
962 /// all paths of execution between the current IP and the
963 /// the point at which the cleanup comes into scope.
965 llvm::Instruction *DominatingIP);
966
967 /// Enters a new scope for capturing cleanups, all of which
968 /// will be executed once the scope is exited.
970 EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
971 size_t LifetimeExtendedCleanupStackSize;
972 CleanupDeactivationScope DeactivateCleanups;
973 bool OldDidCallStackSave;
974 protected:
976 private:
977
978 RunCleanupsScope(const RunCleanupsScope &) = delete;
979 void operator=(const RunCleanupsScope &) = delete;
980
981 protected:
983
984 public:
985 /// Enter a new cleanup scope.
987 : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) {
988 CleanupStackDepth = CGF.EHStack.stable_begin();
989 LifetimeExtendedCleanupStackSize =
991 OldDidCallStackSave = CGF.DidCallStackSave;
992 CGF.DidCallStackSave = false;
993 OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
994 CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
995 }
996
997 /// Exit this cleanup scope, emitting any accumulated cleanups.
999 if (PerformCleanup)
1000 ForceCleanup();
1001 }
1002
1003 /// Determine whether this scope requires any cleanups.
1004 bool requiresCleanups() const {
1005 return CGF.EHStack.stable_begin() != CleanupStackDepth;
1006 }
1007
1008 /// Force the emission of cleanups now, instead of waiting
1009 /// until this object is destroyed.
1010 /// \param ValuesToReload - A list of values that need to be available at
1011 /// the insertion point after cleanup emission. If cleanup emission created
1012 /// a shared cleanup block, these value pointers will be rewritten.
1013 /// Otherwise, they not will be modified.
1014 void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
1015 assert(PerformCleanup && "Already forced cleanup");
1016 CGF.DidCallStackSave = OldDidCallStackSave;
1017 DeactivateCleanups.ForceDeactivate();
1018 CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
1019 ValuesToReload);
1020 PerformCleanup = false;
1021 CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
1022 }
1023 };
1024
1025 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1028
1030 SourceRange Range;
1032 LexicalScope *ParentScope;
1033
1034 LexicalScope(const LexicalScope &) = delete;
1035 void operator=(const LexicalScope &) = delete;
1036
1037 public:
1038 /// Enter a new cleanup scope.
1040 : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
1041 CGF.CurLexicalScope = this;
1042 if (CGDebugInfo *DI = CGF.getDebugInfo())
1043 DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
1044 }
1045
1046 void addLabel(const LabelDecl *label) {
1047 assert(PerformCleanup && "adding label to dead scope?");
1048 Labels.push_back(label);
1049 }
1050
1051 /// Exit this cleanup scope, emitting any accumulated
1052 /// cleanups.
1054 if (CGDebugInfo *DI = CGF.getDebugInfo())
1055 DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
1056
1057 // If we should perform a cleanup, force them now. Note that
1058 // this ends the cleanup scope before rescoping any labels.
1059 if (PerformCleanup) {
1060 ApplyDebugLocation DL(CGF, Range.getEnd());
1061 ForceCleanup();
1062 }
1063 }
1064
1065 /// Force the emission of cleanups now, instead of waiting
1066 /// until this object is destroyed.
1068 CGF.CurLexicalScope = ParentScope;
1070
1071 if (!Labels.empty())
1072 rescopeLabels();
1073 }
1074
1075 bool hasLabels() const {
1076 return !Labels.empty();
1077 }
1078
1079 void rescopeLabels();
1080 };
1081
1082 typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
1083
1084 /// The class used to assign some variables some temporarily addresses.
1086 DeclMapTy SavedLocals;
1087 DeclMapTy SavedTempAddresses;
1088 OMPMapVars(const OMPMapVars &) = delete;
1089 void operator=(const OMPMapVars &) = delete;
1090
1091 public:
1092 explicit OMPMapVars() = default;
1094 assert(SavedLocals.empty() && "Did not restored original addresses.");
1095 };
1096
1097 /// Sets the address of the variable \p LocalVD to be \p TempAddr in
1098 /// function \p CGF.
1099 /// \return true if at least one variable was set already, false otherwise.
1100 bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
1101 Address TempAddr) {
1102 LocalVD = LocalVD->getCanonicalDecl();
1103 // Only save it once.
1104 if (SavedLocals.count(LocalVD)) return false;
1105
1106 // Copy the existing local entry to SavedLocals.
1107 auto it = CGF.LocalDeclMap.find(LocalVD);
1108 if (it != CGF.LocalDeclMap.end())
1109 SavedLocals.try_emplace(LocalVD, it->second);
1110 else
1111 SavedLocals.try_emplace(LocalVD, Address::invalid());
1112
1113 // Generate the private entry.
1114 QualType VarTy = LocalVD->getType();
1115 if (VarTy->isReferenceType()) {
1116 Address Temp = CGF.CreateMemTemp(VarTy);
1117 CGF.Builder.CreateStore(TempAddr.emitRawPointer(CGF), Temp);
1118 TempAddr = Temp;
1119 }
1120 SavedTempAddresses.try_emplace(LocalVD, TempAddr);
1121
1122 return true;
1123 }
1124
1125 /// Applies new addresses to the list of the variables.
1126 /// \return true if at least one variable is using new address, false
1127 /// otherwise.
1129 copyInto(SavedTempAddresses, CGF.LocalDeclMap);
1130 SavedTempAddresses.clear();
1131 return !SavedLocals.empty();
1132 }
1133
1134 /// Restores original addresses of the variables.
1136 if (!SavedLocals.empty()) {
1137 copyInto(SavedLocals, CGF.LocalDeclMap);
1138 SavedLocals.clear();
1139 }
1140 }
1141
1142 private:
1143 /// Copy all the entries in the source map over the corresponding
1144 /// entries in the destination, which must exist.
1145 static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
1146 for (auto &Pair : Src) {
1147 if (!Pair.second.isValid()) {
1148 Dest.erase(Pair.first);
1149 continue;
1150 }
1151
1152 auto I = Dest.find(Pair.first);
1153 if (I != Dest.end())
1154 I->second = Pair.second;
1155 else
1156 Dest.insert(Pair);
1157 }
1158 }
1159 };
1160
1161 /// The scope used to remap some variables as private in the OpenMP loop body
1162 /// (or other captured region emitted without outlining), and to restore old
1163 /// vars back on exit.
1165 OMPMapVars MappedVars;
1166 OMPPrivateScope(const OMPPrivateScope &) = delete;
1167 void operator=(const OMPPrivateScope &) = delete;
1168
1169 public:
1170 /// Enter a new OpenMP private scope.
1172
1173 /// Registers \p LocalVD variable as a private with \p Addr as the address
1174 /// of the corresponding private variable. \p
1175 /// PrivateGen is the address of the generated private variable.
1176 /// \return true if the variable is registered as private, false if it has
1177 /// been privatized already.
1178 bool addPrivate(const VarDecl *LocalVD, Address Addr) {
1179 assert(PerformCleanup && "adding private to dead scope");
1180 return MappedVars.setVarAddr(CGF, LocalVD, Addr);
1181 }
1182
1183 /// Privatizes local variables previously registered as private.
1184 /// Registration is separate from the actual privatization to allow
1185 /// initializers use values of the original variables, not the private one.
1186 /// This is important, for example, if the private variable is a class
1187 /// variable initialized by a constructor that references other private
1188 /// variables. But at initialization original variables must be used, not
1189 /// private copies.
1190 /// \return true if at least one variable was privatized, false otherwise.
1191 bool Privatize() { return MappedVars.apply(CGF); }
1192
1195 restoreMap();
1196 }
1197
1198 /// Exit scope - all the mapped variables are restored.
1200 if (PerformCleanup)
1201 ForceCleanup();
1202 }
1203
1204 /// Checks if the global variable is captured in current function.
1205 bool isGlobalVarCaptured(const VarDecl *VD) const {
1206 VD = VD->getCanonicalDecl();
1207 return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
1208 }
1209
1210 /// Restore all mapped variables w/o clean up. This is usefully when we want
1211 /// to reference the original variables but don't want the clean up because
1212 /// that could emit lifetime end too early, causing backend issue #56913.
1213 void restoreMap() { MappedVars.restore(CGF); }
1214 };
1215
1216 /// Save/restore original map of previously emitted local vars in case when we
1217 /// need to duplicate emission of the same code several times in the same
1218 /// function for OpenMP code.
1220 CodeGenFunction &CGF;
1221 DeclMapTy SavedMap;
1222
1223 public:
1225 : CGF(CGF), SavedMap(CGF.LocalDeclMap) {}
1226 ~OMPLocalDeclMapRAII() { SavedMap.swap(CGF.LocalDeclMap); }
1227 };
1228
1229 /// Takes the old cleanup stack size and emits the cleanup blocks
1230 /// that have been added.
1231 void
1233 std::initializer_list<llvm::Value **> ValuesToReload = {});
1234
1235 /// Takes the old cleanup stack size and emits the cleanup blocks
1236 /// that have been added, then adds all lifetime-extended cleanups from
1237 /// the given position to the stack.
1238 void
1240 size_t OldLifetimeExtendedStackSize,
1241 std::initializer_list<llvm::Value **> ValuesToReload = {});
1242
1243 void ResolveBranchFixups(llvm::BasicBlock *Target);
1244
1245 /// The given basic block lies in the current EH scope, but may be a
1246 /// target of a potentially scope-crossing jump; get a stable handle
1247 /// to which we can perform this jump later.
1249 return JumpDest(Target,
1252 }
1253
1254 /// The given basic block lies in the current EH scope, but may be a
1255 /// target of a potentially scope-crossing jump; get a stable handle
1256 /// to which we can perform this jump later.
1257 JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
1259 }
1260
1261 /// EmitBranchThroughCleanup - Emit a branch from the current insert
1262 /// block through the normal cleanup handling code (if any) and then
1263 /// on to \arg Dest.
1265
1266 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
1267 /// specified destination obviously has no cleanups to run. 'false' is always
1268 /// a conservatively correct answer for this method.
1270
1271 /// popCatchScope - Pops the catch scope at the top of the EHScope
1272 /// stack, emitting any required code (other than the catch handlers
1273 /// themselves).
1275
1276 llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
1278 llvm::BasicBlock *
1280
1281 /// An object to manage conditionally-evaluated expressions.
1283 llvm::BasicBlock *StartBB;
1284
1285 public:
1287 : StartBB(CGF.Builder.GetInsertBlock()) {}
1288
1290 assert(CGF.OutermostConditional != this);
1291 if (!CGF.OutermostConditional)
1292 CGF.OutermostConditional = this;
1293 }
1294
1296 assert(CGF.OutermostConditional != nullptr);
1297 if (CGF.OutermostConditional == this)
1298 CGF.OutermostConditional = nullptr;
1299 }
1300
1301 /// Returns a block which will be executed prior to each
1302 /// evaluation of the conditional code.
1303 llvm::BasicBlock *getStartingBlock() const {
1304 return StartBB;
1305 }
1306 };
1307
1308 /// isInConditionalBranch - Return true if we're currently emitting
1309 /// one branch or the other of a conditional expression.
1310 bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1311
1312 void setBeforeOutermostConditional(llvm::Value *value, Address addr,
1313 CodeGenFunction &CGF) {
1314 assert(isInConditionalBranch());
1315 llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1316 auto store = new llvm::StoreInst(value, addr.emitRawPointer(CGF),
1317 block->back().getIterator());
1318 store->setAlignment(addr.getAlignment().getAsAlign());
1319 }
1320
1321 /// An RAII object to record that we're evaluating a statement
1322 /// expression.
1324 CodeGenFunction &CGF;
1325
1326 /// We have to save the outermost conditional: cleanups in a
1327 /// statement expression aren't conditional just because the
1328 /// StmtExpr is.
1329 ConditionalEvaluation *SavedOutermostConditional;
1330
1331 public:
1333 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1334 CGF.OutermostConditional = nullptr;
1335 }
1336
1338 CGF.OutermostConditional = SavedOutermostConditional;
1339 CGF.EnsureInsertPoint();
1340 }
1341 };
1342
1343 /// An object which temporarily prevents a value from being
1344 /// destroyed by aggressive peephole optimizations that assume that
1345 /// all uses of a value have been realized in the IR.
1347 llvm::Instruction *Inst = nullptr;
1348 friend class CodeGenFunction;
1349
1350 public:
1352 };
1353
1354 /// A non-RAII class containing all the information about a bound
1355 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1356 /// this which makes individual mappings very simple; using this
1357 /// class directly is useful when you have a variable number of
1358 /// opaque values or don't want the RAII functionality for some
1359 /// reason.
1361 const OpaqueValueExpr *OpaqueValue;
1362 bool BoundLValue;
1364
1366 bool boundLValue)
1367 : OpaqueValue(ov), BoundLValue(boundLValue) {}
1368 public:
1369 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1370
1371 static bool shouldBindAsLValue(const Expr *expr) {
1372 // gl-values should be bound as l-values for obvious reasons.
1373 // Records should be bound as l-values because IR generation
1374 // always keeps them in memory. Expressions of function type
1375 // act exactly like l-values but are formally required to be
1376 // r-values in C.
1377 return expr->isGLValue() ||
1378 expr->getType()->isFunctionType() ||
1379 hasAggregateEvaluationKind(expr->getType());
1380 }
1381
1383 const OpaqueValueExpr *ov,
1384 const Expr *e) {
1385 if (shouldBindAsLValue(ov))
1386 return bind(CGF, ov, CGF.EmitLValue(e));
1387 return bind(CGF, ov, CGF.EmitAnyExpr(e));
1388 }
1389
1391 const OpaqueValueExpr *ov,
1392 const LValue &lv) {
1393 assert(shouldBindAsLValue(ov));
1394 CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1395 return OpaqueValueMappingData(ov, true);
1396 }
1397
1399 const OpaqueValueExpr *ov,
1400 const RValue &rv) {
1401 assert(!shouldBindAsLValue(ov));
1402 CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1403
1404 OpaqueValueMappingData data(ov, false);
1405
1406 // Work around an extremely aggressive peephole optimization in
1407 // EmitScalarConversion which assumes that all other uses of a
1408 // value are extant.
1409 data.Protection = CGF.protectFromPeepholes(rv);
1410
1411 return data;
1412 }
1413
1414 bool isValid() const { return OpaqueValue != nullptr; }
1415 void clear() { OpaqueValue = nullptr; }
1416
1418 assert(OpaqueValue && "no data to unbind!");
1419
1420 if (BoundLValue) {
1421 CGF.OpaqueLValues.erase(OpaqueValue);
1422 } else {
1423 CGF.OpaqueRValues.erase(OpaqueValue);
1424 CGF.unprotectFromPeepholes(Protection);
1425 }
1426 }
1427 };
1428
1429 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1431 CodeGenFunction &CGF;
1433
1434 public:
1435 static bool shouldBindAsLValue(const Expr *expr) {
1437 }
1438
1439 /// Build the opaque value mapping for the given conditional
1440 /// operator if it's the GNU ?: extension. This is a common
1441 /// enough pattern that the convenience operator is really
1442 /// helpful.
1443 ///
1445 const AbstractConditionalOperator *op) : CGF(CGF) {
1446 if (isa<ConditionalOperator>(op))
1447 // Leave Data empty.
1448 return;
1449
1450 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1452 e->getCommon());
1453 }
1454
1455 /// Build the opaque value mapping for an OpaqueValueExpr whose source
1456 /// expression is set to the expression the OVE represents.
1458 : CGF(CGF) {
1459 if (OV) {
1460 assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1461 "for OVE with no source expression");
1463 }
1464 }
1465
1467 const OpaqueValueExpr *opaqueValue,
1468 LValue lvalue)
1469 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1470 }
1471
1473 const OpaqueValueExpr *opaqueValue,
1474 RValue rvalue)
1475 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1476 }
1477
1478 void pop() {
1479 Data.unbind(CGF);
1480 Data.clear();
1481 }
1482
1484 if (Data.isValid()) Data.unbind(CGF);
1485 }
1486 };
1487
1488private:
1489 CGDebugInfo *DebugInfo;
1490 /// Used to create unique names for artificial VLA size debug info variables.
1491 unsigned VLAExprCounter = 0;
1492 bool DisableDebugInfo = false;
1493
1494 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1495 /// calling llvm.stacksave for multiple VLAs in the same scope.
1496 bool DidCallStackSave = false;
1497
1498 /// IndirectBranch - The first time an indirect goto is seen we create a block
1499 /// with an indirect branch. Every time we see the address of a label taken,
1500 /// we add the label to the indirect goto. Every subsequent indirect goto is
1501 /// codegen'd as a jump to the IndirectBranch's basic block.
1502 llvm::IndirectBrInst *IndirectBranch = nullptr;
1503
1504 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1505 /// decls.
1506 DeclMapTy LocalDeclMap;
1507
1508 // Keep track of the cleanups for callee-destructed parameters pushed to the
1509 // cleanup stack so that they can be deactivated later.
1510 llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1511 CalleeDestructedParamCleanups;
1512
1513 /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1514 /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1515 /// parameter.
1516 llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1517 SizeArguments;
1518
1519 /// Track escaped local variables with auto storage. Used during SEH
1520 /// outlining to produce a call to llvm.localescape.
1521 llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1522
1523 /// LabelMap - This keeps track of the LLVM basic block for each C label.
1524 llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1525
1526 // BreakContinueStack - This keeps track of where break and continue
1527 // statements should jump to.
1528 struct BreakContinue {
1529 BreakContinue(JumpDest Break, JumpDest Continue)
1530 : BreakBlock(Break), ContinueBlock(Continue) {}
1531
1532 JumpDest BreakBlock;
1533 JumpDest ContinueBlock;
1534 };
1535 SmallVector<BreakContinue, 8> BreakContinueStack;
1536
1537 /// Handles cancellation exit points in OpenMP-related constructs.
1538 class OpenMPCancelExitStack {
1539 /// Tracks cancellation exit point and join point for cancel-related exit
1540 /// and normal exit.
1541 struct CancelExit {
1542 CancelExit() = default;
1543 CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1544 JumpDest ContBlock)
1545 : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1546 OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
1547 /// true if the exit block has been emitted already by the special
1548 /// emitExit() call, false if the default codegen is used.
1549 bool HasBeenEmitted = false;
1550 JumpDest ExitBlock;
1551 JumpDest ContBlock;
1552 };
1553
1554 SmallVector<CancelExit, 8> Stack;
1555
1556 public:
1557 OpenMPCancelExitStack() : Stack(1) {}
1558 ~OpenMPCancelExitStack() = default;
1559 /// Fetches the exit block for the current OpenMP construct.
1560 JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1561 /// Emits exit block with special codegen procedure specific for the related
1562 /// OpenMP construct + emits code for normal construct cleanup.
1563 void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1564 const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1565 if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1566 assert(CGF.getOMPCancelDestination(Kind).isValid());
1567 assert(CGF.HaveInsertPoint());
1568 assert(!Stack.back().HasBeenEmitted);
1569 auto IP = CGF.Builder.saveAndClearIP();
1570 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1571 CodeGen(CGF);
1572 CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1573 CGF.Builder.restoreIP(IP);
1574 Stack.back().HasBeenEmitted = true;
1575 }
1576 CodeGen(CGF);
1577 }
1578 /// Enter the cancel supporting \a Kind construct.
1579 /// \param Kind OpenMP directive that supports cancel constructs.
1580 /// \param HasCancel true, if the construct has inner cancel directive,
1581 /// false otherwise.
1582 void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1583 Stack.push_back({Kind,
1584 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
1585 : JumpDest(),
1586 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
1587 : JumpDest()});
1588 }
1589 /// Emits default exit point for the cancel construct (if the special one
1590 /// has not be used) + join point for cancel/normal exits.
1591 void exit(CodeGenFunction &CGF) {
1592 if (getExitBlock().isValid()) {
1593 assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1594 bool HaveIP = CGF.HaveInsertPoint();
1595 if (!Stack.back().HasBeenEmitted) {
1596 if (HaveIP)
1597 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1598 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1599 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1600 }
1601 CGF.EmitBlock(Stack.back().ContBlock.getBlock());
1602 if (!HaveIP) {
1603 CGF.Builder.CreateUnreachable();
1604 CGF.Builder.ClearInsertionPoint();
1605 }
1606 }
1607 Stack.pop_back();
1608 }
1609 };
1610 OpenMPCancelExitStack OMPCancelStack;
1611
1612 /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
1613 llvm::Value *emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
1614 Stmt::Likelihood LH);
1615
1616 CodeGenPGO PGO;
1617
1618 /// Bitmap used by MC/DC to track condition outcomes of a boolean expression.
1619 Address MCDCCondBitmapAddr = Address::invalid();
1620
1621 /// Calculate branch weights appropriate for PGO data
1622 llvm::MDNode *createProfileWeights(uint64_t TrueCount,
1623 uint64_t FalseCount) const;
1624 llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights) const;
1625 llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1626 uint64_t LoopCount) const;
1627
1628public:
1629 /// Increment the profiler's counter for the given statement by \p StepV.
1630 /// If \p StepV is null, the default increment is 1.
1631 void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
1633 !CurFn->hasFnAttribute(llvm::Attribute::NoProfile) &&
1634 !CurFn->hasFnAttribute(llvm::Attribute::SkipProfile)) {
1635 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1636 PGO.emitCounterSetOrIncrement(Builder, S, StepV);
1637 }
1638 PGO.setCurrentStmt(S);
1639 }
1640
1643 CGM.getCodeGenOpts().MCDCCoverage &&
1644 !CurFn->hasFnAttribute(llvm::Attribute::NoProfile));
1645 }
1646
1647 /// Allocate a temp value on the stack that MCDC can use to track condition
1648 /// results.
1650 if (isMCDCCoverageEnabled()) {
1651 PGO.emitMCDCParameters(Builder);
1652 MCDCCondBitmapAddr =
1653 CreateIRTemp(getContext().UnsignedIntTy, "mcdc.addr");
1654 }
1655 }
1656
1657 bool isBinaryLogicalOp(const Expr *E) const {
1658 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(E->IgnoreParens());
1659 return (BOp && BOp->isLogicalOp());
1660 }
1661
1662 /// Zero-init the MCDC temp value.
1665 PGO.emitMCDCCondBitmapReset(Builder, E, MCDCCondBitmapAddr);
1666 PGO.setCurrentStmt(E);
1667 }
1668 }
1669
1670 /// Increment the profiler's counter for the given expression by \p StepV.
1671 /// If \p StepV is null, the default increment is 1.
1674 PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr, *this);
1675 PGO.setCurrentStmt(E);
1676 }
1677 }
1678
1679 /// Update the MCDC temp value with the condition's evaluated result.
1680 void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val) {
1681 if (isMCDCCoverageEnabled()) {
1682 PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val, *this);
1683 PGO.setCurrentStmt(E);
1684 }
1685 }
1686
1687 /// Get the profiler's count for the given statement.
1688 uint64_t getProfileCount(const Stmt *S) {
1689 return PGO.getStmtCount(S).value_or(0);
1690 }
1691
1692 /// Set the profiler's current count.
1693 void setCurrentProfileCount(uint64_t Count) {
1694 PGO.setCurrentRegionCount(Count);
1695 }
1696
1697 /// Get the profiler's current count. This is generally the count for the most
1698 /// recently incremented counter.
1700 return PGO.getCurrentRegionCount();
1701 }
1702
1703private:
1704
1705 /// SwitchInsn - This is nearest current switch instruction. It is null if
1706 /// current context is not in a switch.
1707 llvm::SwitchInst *SwitchInsn = nullptr;
1708 /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1709 SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1710
1711 /// The likelihood attributes of the SwitchCase.
1712 SmallVector<Stmt::Likelihood, 16> *SwitchLikelihood = nullptr;
1713
1714 /// CaseRangeBlock - This block holds if condition check for last case
1715 /// statement range in current switch instruction.
1716 llvm::BasicBlock *CaseRangeBlock = nullptr;
1717
1718 /// OpaqueLValues - Keeps track of the current set of opaque value
1719 /// expressions.
1720 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1721 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1722
1723 // VLASizeMap - This keeps track of the associated size for each VLA type.
1724 // We track this by the size expression rather than the type itself because
1725 // in certain situations, like a const qualifier applied to an VLA typedef,
1726 // multiple VLA types can share the same size expression.
1727 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1728 // enter/leave scopes.
1729 llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1730
1731 /// A block containing a single 'unreachable' instruction. Created
1732 /// lazily by getUnreachableBlock().
1733 llvm::BasicBlock *UnreachableBlock = nullptr;
1734
1735 /// Counts of the number return expressions in the function.
1736 unsigned NumReturnExprs = 0;
1737
1738 /// Count the number of simple (constant) return expressions in the function.
1739 unsigned NumSimpleReturnExprs = 0;
1740
1741 /// The last regular (non-return) debug location (breakpoint) in the function.
1742 SourceLocation LastStopPoint;
1743
1744public:
1745 /// Source location information about the default argument or member
1746 /// initializer expression we're evaluating, if any.
1750
1751 /// A scope within which we are constructing the fields of an object which
1752 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1753 /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1755 public:
1757 : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1758 CGF.CXXDefaultInitExprThis = This;
1759 }
1761 CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1762 }
1763
1764 private:
1765 CodeGenFunction &CGF;
1766 Address OldCXXDefaultInitExprThis;
1767 };
1768
1769 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1770 /// is overridden to be the object under construction.
1772 public:
1774 : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1775 OldCXXThisAlignment(CGF.CXXThisAlignment),
1777 CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer();
1778 CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1779 }
1781 CGF.CXXThisValue = OldCXXThisValue;
1782 CGF.CXXThisAlignment = OldCXXThisAlignment;
1783 }
1784
1785 public:
1787 llvm::Value *OldCXXThisValue;
1790 };
1791
1795 };
1796
1797 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1798 /// current loop index is overridden.
1800 public:
1801 ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1802 : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1803 CGF.ArrayInitIndex = Index;
1804 }
1806 CGF.ArrayInitIndex = OldArrayInitIndex;
1807 }
1808
1809 private:
1810 CodeGenFunction &CGF;
1811 llvm::Value *OldArrayInitIndex;
1812 };
1813
1815 public:
1817 : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1818 OldCurCodeDecl(CGF.CurCodeDecl),
1819 OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1820 OldCXXABIThisValue(CGF.CXXABIThisValue),
1821 OldCXXThisValue(CGF.CXXThisValue),
1822 OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1823 OldCXXThisAlignment(CGF.CXXThisAlignment),
1824 OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1825 OldCXXInheritedCtorInitExprArgs(
1826 std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1827 CGF.CurGD = GD;
1828 CGF.CurFuncDecl = CGF.CurCodeDecl =
1829 cast<CXXConstructorDecl>(GD.getDecl());
1830 CGF.CXXABIThisDecl = nullptr;
1831 CGF.CXXABIThisValue = nullptr;
1832 CGF.CXXThisValue = nullptr;
1833 CGF.CXXABIThisAlignment = CharUnits();
1834 CGF.CXXThisAlignment = CharUnits();
1836 CGF.FnRetTy = QualType();
1837 CGF.CXXInheritedCtorInitExprArgs.clear();
1838 }
1840 CGF.CurGD = OldCurGD;
1841 CGF.CurFuncDecl = OldCurFuncDecl;
1842 CGF.CurCodeDecl = OldCurCodeDecl;
1843 CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1844 CGF.CXXABIThisValue = OldCXXABIThisValue;
1845 CGF.CXXThisValue = OldCXXThisValue;
1846 CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1847 CGF.CXXThisAlignment = OldCXXThisAlignment;
1848 CGF.ReturnValue = OldReturnValue;
1849 CGF.FnRetTy = OldFnRetTy;
1850 CGF.CXXInheritedCtorInitExprArgs =
1851 std::move(OldCXXInheritedCtorInitExprArgs);
1852 }
1853
1854 private:
1855 CodeGenFunction &CGF;
1856 GlobalDecl OldCurGD;
1857 const Decl *OldCurFuncDecl;
1858 const Decl *OldCurCodeDecl;
1859 ImplicitParamDecl *OldCXXABIThisDecl;
1860 llvm::Value *OldCXXABIThisValue;
1861 llvm::Value *OldCXXThisValue;
1862 CharUnits OldCXXABIThisAlignment;
1863 CharUnits OldCXXThisAlignment;
1864 Address OldReturnValue;
1865 QualType OldFnRetTy;
1866 CallArgList OldCXXInheritedCtorInitExprArgs;
1867 };
1868
1869 // Helper class for the OpenMP IR Builder. Allows reusability of code used for
1870 // region body, and finalization codegen callbacks. This will class will also
1871 // contain privatization functions used by the privatization call backs
1872 //
1873 // TODO: this is temporary class for things that are being moved out of
1874 // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
1875 // utility function for use with the OMPBuilder. Once that move to use the
1876 // OMPBuilder is done, everything here will either become part of CodeGenFunc.
1877 // directly, or a new helper class that will contain functions used by both
1878 // this and the OMPBuilder
1879
1881
1885
1886 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1887
1888 /// Cleanup action for allocate support.
1890
1891 private:
1892 llvm::CallInst *RTLFnCI;
1893
1894 public:
1895 OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) {
1896 RLFnCI->removeFromParent();
1897 }
1898
1899 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
1900 if (!CGF.HaveInsertPoint())
1901 return;
1902 CGF.Builder.Insert(RTLFnCI);
1903 }
1904 };
1905
1906 /// Returns address of the threadprivate variable for the current
1907 /// thread. This Also create any necessary OMP runtime calls.
1908 ///
1909 /// \param VD VarDecl for Threadprivate variable.
1910 /// \param VDAddr Address of the Vardecl
1911 /// \param Loc The location where the barrier directive was encountered
1913 const VarDecl *VD, Address VDAddr,
1915
1916 /// Gets the OpenMP-specific address of the local variable /p VD.
1918 const VarDecl *VD);
1919 /// Get the platform-specific name separator.
1920 /// \param Parts different parts of the final name that needs separation
1921 /// \param FirstSeparator First separator used between the initial two
1922 /// parts of the name.
1923 /// \param Separator separator used between all of the rest consecutinve
1924 /// parts of the name
1925 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
1926 StringRef FirstSeparator = ".",
1927 StringRef Separator = ".");
1928 /// Emit the Finalization for an OMP region
1929 /// \param CGF The Codegen function this belongs to
1930 /// \param IP Insertion point for generating the finalization code.
1932 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1933 assert(IP.getBlock()->end() != IP.getPoint() &&
1934 "OpenMP IR Builder should cause terminated block!");
1935
1936 llvm::BasicBlock *IPBB = IP.getBlock();
1937 llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor();
1938 assert(DestBB && "Finalization block should have one successor!");
1939
1940 // erase and replace with cleanup branch.
1941 IPBB->getTerminator()->eraseFromParent();
1942 CGF.Builder.SetInsertPoint(IPBB);
1944 CGF.EmitBranchThroughCleanup(Dest);
1945 }
1946
1947 /// Emit the body of an OMP region
1948 /// \param CGF The Codegen function this belongs to
1949 /// \param RegionBodyStmt The body statement for the OpenMP region being
1950 /// generated
1951 /// \param AllocaIP Where to insert alloca instructions
1952 /// \param CodeGenIP Where to insert the region code
1953 /// \param RegionName Name to be used for new blocks
1955 const Stmt *RegionBodyStmt,
1956 InsertPointTy AllocaIP,
1957 InsertPointTy CodeGenIP,
1958 Twine RegionName);
1959
1960 static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP,
1961 llvm::BasicBlock &FiniBB, llvm::Function *Fn,
1963 llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
1964 if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
1965 CodeGenIPBBTI->eraseFromParent();
1966
1967 CGF.Builder.SetInsertPoint(CodeGenIPBB);
1968
1969 if (Fn->doesNotThrow())
1970 CGF.EmitNounwindRuntimeCall(Fn, Args);
1971 else
1972 CGF.EmitRuntimeCall(Fn, Args);
1973
1974 if (CGF.Builder.saveIP().isSet())
1975 CGF.Builder.CreateBr(&FiniBB);
1976 }
1977
1978 /// Emit the body of an OMP region that will be outlined in
1979 /// OpenMPIRBuilder::finalize().
1980 /// \param CGF The Codegen function this belongs to
1981 /// \param RegionBodyStmt The body statement for the OpenMP region being
1982 /// generated
1983 /// \param AllocaIP Where to insert alloca instructions
1984 /// \param CodeGenIP Where to insert the region code
1985 /// \param RegionName Name to be used for new blocks
1987 const Stmt *RegionBodyStmt,
1988 InsertPointTy AllocaIP,
1989 InsertPointTy CodeGenIP,
1990 Twine RegionName);
1991
1992 /// RAII for preserving necessary info during Outlined region body codegen.
1994
1995 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
1996 CodeGenFunction::JumpDest OldReturnBlock;
1997 CodeGenFunction &CGF;
1998
1999 public:
2001 llvm::BasicBlock &RetBB)
2002 : CGF(cgf) {
2003 assert(AllocaIP.isSet() &&
2004 "Must specify Insertion point for allocas of outlined function");
2005 OldAllocaIP = CGF.AllocaInsertPt;
2006 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2007
2008 OldReturnBlock = CGF.ReturnBlock;
2009 CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB);
2010 }
2011
2013 CGF.AllocaInsertPt = OldAllocaIP;
2014 CGF.ReturnBlock = OldReturnBlock;
2015 }
2016 };
2017
2018 /// RAII for preserving necessary info during inlined region body codegen.
2020
2021 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2022 CodeGenFunction &CGF;
2023
2024 public:
2026 llvm::BasicBlock &FiniBB)
2027 : CGF(cgf) {
2028 // Alloca insertion block should be in the entry block of the containing
2029 // function so it expects an empty AllocaIP in which case will reuse the
2030 // old alloca insertion point, or a new AllocaIP in the same block as
2031 // the old one
2032 assert((!AllocaIP.isSet() ||
2033 CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) &&
2034 "Insertion point should be in the entry block of containing "
2035 "function!");
2036 OldAllocaIP = CGF.AllocaInsertPt;
2037 if (AllocaIP.isSet())
2038 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2039
2040 // TODO: Remove the call, after making sure the counter is not used by
2041 // the EHStack.
2042 // Since this is an inlined region, it should not modify the
2043 // ReturnBlock, and should reuse the one for the enclosing outlined
2044 // region. So, the JumpDest being return by the function is discarded
2045 (void)CGF.getJumpDestInCurrentScope(&FiniBB);
2046 }
2047
2049 };
2050 };
2051
2052private:
2053 /// CXXThisDecl - When generating code for a C++ member function,
2054 /// this will hold the implicit 'this' declaration.
2055 ImplicitParamDecl *CXXABIThisDecl = nullptr;
2056 llvm::Value *CXXABIThisValue = nullptr;
2057 llvm::Value *CXXThisValue = nullptr;
2058 CharUnits CXXABIThisAlignment;
2059 CharUnits CXXThisAlignment;
2060
2061 /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
2062 /// this expression.
2063 Address CXXDefaultInitExprThis = Address::invalid();
2064
2065 /// The current array initialization index when evaluating an
2066 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
2067 llvm::Value *ArrayInitIndex = nullptr;
2068
2069 /// The values of function arguments to use when evaluating
2070 /// CXXInheritedCtorInitExprs within this context.
2071 CallArgList CXXInheritedCtorInitExprArgs;
2072
2073 /// CXXStructorImplicitParamDecl - When generating code for a constructor or
2074 /// destructor, this will hold the implicit argument (e.g. VTT).
2075 ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
2076 llvm::Value *CXXStructorImplicitParamValue = nullptr;
2077
2078 /// OutermostConditional - Points to the outermost active
2079 /// conditional control. This is used so that we know if a
2080 /// temporary should be destroyed conditionally.
2081 ConditionalEvaluation *OutermostConditional = nullptr;
2082
2083 /// The current lexical scope.
2084 LexicalScope *CurLexicalScope = nullptr;
2085
2086 /// The current source location that should be used for exception
2087 /// handling code.
2088 SourceLocation CurEHLocation;
2089
2090 /// BlockByrefInfos - For each __block variable, contains
2091 /// information about the layout of the variable.
2092 llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
2093
2094 /// Used by -fsanitize=nullability-return to determine whether the return
2095 /// value can be checked.
2096 llvm::Value *RetValNullabilityPrecondition = nullptr;
2097
2098 /// Check if -fsanitize=nullability-return instrumentation is required for
2099 /// this function.
2100 bool requiresReturnValueNullabilityCheck() const {
2101 return RetValNullabilityPrecondition;
2102 }
2103
2104 /// Used to store precise source locations for return statements by the
2105 /// runtime return value checks.
2106 Address ReturnLocation = Address::invalid();
2107
2108 /// Check if the return value of this function requires sanitization.
2109 bool requiresReturnValueCheck() const;
2110
2111 bool isInAllocaArgument(CGCXXABI &ABI, QualType Ty);
2112 bool hasInAllocaArg(const CXXMethodDecl *MD);
2113
2114 llvm::BasicBlock *TerminateLandingPad = nullptr;
2115 llvm::BasicBlock *TerminateHandler = nullptr;
2117
2118 /// Terminate funclets keyed by parent funclet pad.
2119 llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
2120
2121 /// Largest vector width used in ths function. Will be used to create a
2122 /// function attribute.
2123 unsigned LargestVectorWidth = 0;
2124
2125 /// True if we need emit the life-time markers. This is initially set in
2126 /// the constructor, but could be overwritten to true if this is a coroutine.
2127 bool ShouldEmitLifetimeMarkers;
2128
2129 /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
2130 /// the function metadata.
2131 void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn);
2132
2133public:
2134 CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
2136
2137 CodeGenTypes &getTypes() const { return CGM.getTypes(); }
2138 ASTContext &getContext() const { return CGM.getContext(); }
2140 if (DisableDebugInfo)
2141 return nullptr;
2142 return DebugInfo;
2143 }
2144 void disableDebugInfo() { DisableDebugInfo = true; }
2145 void enableDebugInfo() { DisableDebugInfo = false; }
2146
2148 return CGM.getCodeGenOpts().OptimizationLevel == 0;
2149 }
2150
2151 const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
2152
2153 /// Returns a pointer to the function's exception object and selector slot,
2154 /// which is assigned in every landing pad.
2157
2158 /// Returns the contents of the function's exception object and selector
2159 /// slots.
2160 llvm::Value *getExceptionFromSlot();
2161 llvm::Value *getSelectorFromSlot();
2162
2164
2165 llvm::BasicBlock *getUnreachableBlock() {
2166 if (!UnreachableBlock) {
2167 UnreachableBlock = createBasicBlock("unreachable");
2168 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
2169 }
2170 return UnreachableBlock;
2171 }
2172
2173 llvm::BasicBlock *getInvokeDest() {
2174 if (!EHStack.requiresLandingPad()) return nullptr;
2175 return getInvokeDestImpl();
2176 }
2177
2178 bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; }
2179
2180 const TargetInfo &getTarget() const { return Target; }
2181 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
2183 return CGM.getTargetCodeGenInfo();
2184 }
2185
2186 //===--------------------------------------------------------------------===//
2187 // Cleanups
2188 //===--------------------------------------------------------------------===//
2189
2190 typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
2191
2192 void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2193 Address arrayEndPointer,
2194 QualType elementType,
2195 CharUnits elementAlignment,
2196 Destroyer *destroyer);
2197 void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2198 llvm::Value *arrayEnd,
2199 QualType elementType,
2200 CharUnits elementAlignment,
2201 Destroyer *destroyer);
2202
2204 Address addr, QualType type);
2206 Address addr, QualType type);
2208 Destroyer *destroyer, bool useEHCleanupForArray);
2210 Address addr, QualType type);
2212 QualType type, Destroyer *destroyer,
2213 bool useEHCleanupForArray);
2215 QualType type, Destroyer *destroyer,
2216 bool useEHCleanupForArray);
2217 void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
2218 llvm::Value *CompletePtr,
2219 QualType ElementType);
2222 std::pair<llvm::Value *, llvm::Value *> AddrSizePair);
2224 bool useEHCleanupForArray);
2226 Destroyer *destroyer,
2227 bool useEHCleanupForArray,
2228 const VarDecl *VD);
2229 void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
2230 QualType elementType, CharUnits elementAlign,
2231 Destroyer *destroyer,
2232 bool checkZeroLength, bool useEHCleanup);
2233
2235
2236 /// Determines whether an EH cleanup is required to destroy a type
2237 /// with the given destruction kind.
2239 switch (kind) {
2240 case QualType::DK_none:
2241 return false;
2245 return getLangOpts().Exceptions;
2247 return getLangOpts().Exceptions &&
2248 CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
2249 }
2250 llvm_unreachable("bad destruction kind");
2251 }
2252
2255 }
2256
2257 //===--------------------------------------------------------------------===//
2258 // Objective-C
2259 //===--------------------------------------------------------------------===//
2260
2262
2264
2265 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
2267 const ObjCPropertyImplDecl *PID);
2269 const ObjCPropertyImplDecl *propImpl,
2270 const ObjCMethodDecl *GetterMothodDecl,
2271 llvm::Constant *AtomicHelperFn);
2272
2274 ObjCMethodDecl *MD, bool ctor);
2275
2276 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
2277 /// for the given property.
2279 const ObjCPropertyImplDecl *PID);
2281 const ObjCPropertyImplDecl *propImpl,
2282 llvm::Constant *AtomicHelperFn);
2283
2284 //===--------------------------------------------------------------------===//
2285 // Block Bits
2286 //===--------------------------------------------------------------------===//
2287
2288 /// Emit block literal.
2289 /// \return an LLVM value which is a pointer to a struct which contains
2290 /// information about the block, including the block invoke function, the
2291 /// captured variables, etc.
2292 llvm::Value *EmitBlockLiteral(const BlockExpr *);
2293
2295 const CGBlockInfo &Info,
2296 const DeclMapTy &ldm,
2297 bool IsLambdaConversionToBlock,
2298 bool BuildGlobalBlock);
2299
2300 /// Check if \p T is a C++ class that has a destructor that can throw.
2302
2303 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
2304 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
2306 const ObjCPropertyImplDecl *PID);
2308 const ObjCPropertyImplDecl *PID);
2309 llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
2310
2311 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
2312 bool CanThrow);
2313
2314 class AutoVarEmission;
2315
2317
2318 /// Enter a cleanup to destroy a __block variable. Note that this
2319 /// cleanup should be a no-op if the variable hasn't left the stack
2320 /// yet; if a cleanup is required for the variable itself, that needs
2321 /// to be done externally.
2322 ///
2323 /// \param Kind Cleanup kind.
2324 ///
2325 /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
2326 /// structure that will be passed to _Block_object_dispose. When
2327 /// \p LoadBlockVarAddr is true, the address of the field of the block
2328 /// structure that holds the address of the __block structure.
2329 ///
2330 /// \param Flags The flag that will be passed to _Block_object_dispose.
2331 ///
2332 /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
2333 /// \p Addr to get the address of the __block structure.
2335 bool LoadBlockVarAddr, bool CanThrow);
2336
2337 void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
2338 llvm::Value *ptr);
2339
2342
2343 /// BuildBlockByrefAddress - Computes the location of the
2344 /// data in a variable which is declared as __block.
2346 bool followForward = true);
2348 const BlockByrefInfo &info,
2349 bool followForward,
2350 const llvm::Twine &name);
2351
2353
2355
2356 void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
2357 const CGFunctionInfo &FnInfo);
2358
2359 /// Annotate the function with an attribute that disables TSan checking at
2360 /// runtime.
2361 void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
2362
2363 /// Emit code for the start of a function.
2364 /// \param Loc The location to be associated with the function.
2365 /// \param StartLoc The location of the function body.
2367 QualType RetTy,
2368 llvm::Function *Fn,
2369 const CGFunctionInfo &FnInfo,
2370 const FunctionArgList &Args,
2372 SourceLocation StartLoc = SourceLocation());
2373
2375
2379 void EmitFunctionBody(const Stmt *Body);
2380 void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
2381
2382 void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
2383 CallArgList &CallArgs,
2384 const CGFunctionInfo *CallOpFnInfo = nullptr,
2385 llvm::Constant *CallOpFn = nullptr);
2389 CallArgList &CallArgs);
2391 const CGFunctionInfo **ImplFnInfo,
2392 llvm::Function **ImplFn);
2395 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2396 }
2397 void EmitAsanPrologueOrEpilogue(bool Prologue);
2398
2399 /// Emit the unified return block, trying to avoid its emission when
2400 /// possible.
2401 /// \return The debug location of the user written return statement if the
2402 /// return block is avoided.
2403 llvm::DebugLoc EmitReturnBlock();
2404
2405 /// FinishFunction - Complete IR generation of the current function. It is
2406 /// legal to call this function even if there is no current insertion point.
2408
2409 void StartThunk(llvm::Function *Fn, GlobalDecl GD,
2410 const CGFunctionInfo &FnInfo, bool IsUnprototyped);
2411
2412 void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
2413 const ThunkInfo *Thunk, bool IsUnprototyped);
2414
2416
2417 /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
2418 void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
2419 llvm::FunctionCallee Callee);
2420
2421 /// Generate a thunk for the given method.
2422 void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
2423 GlobalDecl GD, const ThunkInfo &Thunk,
2424 bool IsUnprototyped);
2425
2426 llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
2427 const CGFunctionInfo &FnInfo,
2428 GlobalDecl GD, const ThunkInfo &Thunk);
2429
2431 FunctionArgList &Args);
2432
2434
2435 /// Struct with all information about dynamic [sub]class needed to set vptr.
2436 struct VPtr {
2441 };
2442
2443 /// Initialize the vtable pointer of the given subobject.
2445
2447
2450
2452 CharUnits OffsetFromNearestVBase,
2453 bool BaseIsNonVirtualPrimaryBase,
2454 const CXXRecordDecl *VTableClass,
2455 VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
2456
2458
2459 // VTableTrapMode - whether we guarantee that loading the
2460 // vtable is guaranteed to trap on authentication failure,
2461 // even if the resulting vtable pointer is unused.
2462 enum class VTableAuthMode {
2464 MustTrap,
2465 UnsafeUbsanStrip // Should only be used for Vptr UBSan check
2466 };
2467 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
2468 /// to by This.
2469 llvm::Value *
2470 GetVTablePtr(Address This, llvm::Type *VTableTy,
2471 const CXXRecordDecl *VTableClass,
2473
2482 };
2483
2484 /// Derived is the presumed address of an object of type T after a
2485 /// cast. If T is a polymorphic class type, emit a check that the virtual
2486 /// table for Derived belongs to a class derived from T.
2487 void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull,
2489
2490 /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
2491 /// If vptr CFI is enabled, emit a check that VTable is valid.
2492 void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
2494
2495 /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
2496 /// RD using llvm.type.test.
2497 void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
2499
2500 /// If whole-program virtual table optimization is enabled, emit an assumption
2501 /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
2502 /// enabled, emit a check that VTable is a member of RD's type identifier.
2504 llvm::Value *VTable, SourceLocation Loc);
2505
2506 /// Returns whether we should perform a type checked load when loading a
2507 /// virtual function for virtual calls to members of RD. This is generally
2508 /// true when both vcall CFI and whole-program-vtables are enabled.
2510
2511 /// Emit a type checked load from the given vtable.
2513 llvm::Value *VTable,
2514 llvm::Type *VTableTy,
2515 uint64_t VTableByteOffset);
2516
2517 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
2518 /// given phase of destruction for a destructor. The end result
2519 /// should call destructors on members and base classes in reverse
2520 /// order of their construction.
2522
2523 /// ShouldInstrumentFunction - Return true if the current function should be
2524 /// instrumented with __cyg_profile_func_* calls
2526
2527 /// ShouldSkipSanitizerInstrumentation - Return true if the current function
2528 /// should not be instrumented with sanitizers.
2530
2531 /// ShouldXRayInstrument - Return true if the current function should be
2532 /// instrumented with XRay nop sleds.
2534
2535 /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
2536 /// XRay custom event handling calls.
2538
2539 /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
2540 /// XRay typed event handling calls.
2542
2543 /// Return a type hash constant for a function instrumented by
2544 /// -fsanitize=function.
2545 llvm::ConstantInt *getUBSanFunctionTypeHash(QualType T) const;
2546
2547 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
2548 /// arguments for the given function. This is also responsible for naming the
2549 /// LLVM function arguments.
2551 llvm::Function *Fn,
2552 const FunctionArgList &Args);
2553
2554 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
2555 /// given temporary.
2556 void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
2557 SourceLocation EndLoc);
2558
2559 /// Emit a test that checks if the return value \p RV is nonnull.
2560 void EmitReturnValueCheck(llvm::Value *RV);
2561
2562 /// EmitStartEHSpec - Emit the start of the exception spec.
2563 void EmitStartEHSpec(const Decl *D);
2564
2565 /// EmitEndEHSpec - Emit the end of the exception spec.
2566 void EmitEndEHSpec(const Decl *D);
2567
2568 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2569 llvm::BasicBlock *getTerminateLandingPad();
2570
2571 /// getTerminateLandingPad - Return a cleanup funclet that just calls
2572 /// terminate.
2573 llvm::BasicBlock *getTerminateFunclet();
2574
2575 /// getTerminateHandler - Return a handler (not a landing pad, just
2576 /// a catch handler) that just calls terminate. This is used when
2577 /// a terminate scope encloses a try.
2578 llvm::BasicBlock *getTerminateHandler();
2579
2581 llvm::Type *ConvertType(QualType T);
2583 llvm::Type *LLVMTy = nullptr);
2584 llvm::Type *ConvertType(const TypeDecl *T) {
2585 return ConvertType(getContext().getTypeDeclType(T));
2586 }
2587
2588 /// LoadObjCSelf - Load the value of self. This function is only valid while
2589 /// generating code for an Objective-C method.
2590 llvm::Value *LoadObjCSelf();
2591
2592 /// TypeOfSelfObject - Return type of object that this self represents.
2594
2595 /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2597
2599 return getEvaluationKind(T) == TEK_Scalar;
2600 }
2601
2604 }
2605
2606 /// createBasicBlock - Create an LLVM basic block.
2607 llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2608 llvm::Function *parent = nullptr,
2609 llvm::BasicBlock *before = nullptr) {
2610 return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
2611 }
2612
2613 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2614 /// label maps to.
2616
2617 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2618 /// another basic block, simplify it. This assumes that no other code could
2619 /// potentially reference the basic block.
2620 void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2621
2622 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2623 /// adding a fall-through branch from the current insert block if
2624 /// necessary. It is legal to call this function even if there is no current
2625 /// insertion point.
2626 ///
2627 /// IsFinished - If true, indicates that the caller has finished emitting
2628 /// branches to the given block and does not expect to emit code into it. This
2629 /// means the block can be ignored if it is unreachable.
2630 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
2631
2632 /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2633 /// near its uses, and leave the insertion point in it.
2634 void EmitBlockAfterUses(llvm::BasicBlock *BB);
2635
2636 /// EmitBranch - Emit a branch to the specified basic block from the current
2637 /// insert block, taking care to avoid creation of branches from dummy
2638 /// blocks. It is legal to call this function even if there is no current
2639 /// insertion point.
2640 ///
2641 /// This function clears the current insertion point. The caller should follow
2642 /// calls to this function with calls to Emit*Block prior to generation new
2643 /// code.
2644 void EmitBranch(llvm::BasicBlock *Block);
2645
2646 /// HaveInsertPoint - True if an insertion point is defined. If not, this
2647 /// indicates that the current code being emitted is unreachable.
2648 bool HaveInsertPoint() const {
2649 return Builder.GetInsertBlock() != nullptr;
2650 }
2651
2652 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2653 /// emitted IR has a place to go. Note that by definition, if this function
2654 /// creates a block then that block is unreachable; callers may do better to
2655 /// detect when no insertion point is defined and simply skip IR generation.
2657 if (!HaveInsertPoint())
2659 }
2660
2661 /// ErrorUnsupported - Print out an error that codegen doesn't support the
2662 /// specified stmt yet.
2663 void ErrorUnsupported(const Stmt *S, const char *Type);
2664
2665 //===--------------------------------------------------------------------===//
2666 // Helpers
2667 //===--------------------------------------------------------------------===//
2668
2670 llvm::BasicBlock *LHSBlock,
2671 llvm::BasicBlock *RHSBlock,
2672 llvm::BasicBlock *MergeBlock,
2673 QualType MergedType) {
2674 Builder.SetInsertPoint(MergeBlock);
2675 llvm::PHINode *PtrPhi = Builder.CreatePHI(LHS.getType(), 2, "cond");
2676 PtrPhi->addIncoming(LHS.getBasePointer(), LHSBlock);
2677 PtrPhi->addIncoming(RHS.getBasePointer(), RHSBlock);
2678 LHS.replaceBasePointer(PtrPhi);
2679 LHS.setAlignment(std::min(LHS.getAlignment(), RHS.getAlignment()));
2680 return LHS;
2681 }
2682
2683 /// Construct an address with the natural alignment of T. If a pointer to T
2684 /// is expected to be signed, the pointer passed to this function must have
2685 /// been signed, and the returned Address will have the pointer authentication
2686 /// information needed to authenticate the signed pointer.
2688 llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(),
2689 bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr,
2690 TBAAAccessInfo *TBAAInfo = nullptr,
2691 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
2692 if (Alignment.isZero())
2693 Alignment =
2694 CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, ForPointeeType);
2695 return Address(Ptr, ConvertTypeForMem(T), Alignment,
2696 CGM.getPointerAuthInfoForPointeeType(T), /*Offset=*/nullptr,
2697 IsKnownNonNull);
2698 }
2699
2702 return MakeAddrLValue(Addr, T, LValueBaseInfo(Source),
2704 }
2705
2707 TBAAAccessInfo TBAAInfo) {
2708 return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
2709 }
2710
2711 LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2713 return MakeAddrLValue(makeNaturalAddressForPointer(V, T, Alignment), T,
2715 }
2716
2717 /// Same as MakeAddrLValue above except that the pointer is known to be
2718 /// unsigned.
2719 LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2721 Address Addr(V, ConvertTypeForMem(T), Alignment);
2722 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2724 }
2725
2726 LValue
2729 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2730 TBAAAccessInfo());
2731 }
2732
2733 /// Given a value of type T* that may not be to a complete object, construct
2734 /// an l-value with the natural pointee alignment of T.
2736
2737 LValue
2739 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
2740
2741 /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
2742 /// to be unsigned.
2744
2746
2748 LValueBaseInfo *PointeeBaseInfo = nullptr,
2749 TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2752 AlignmentSource Source =
2754 LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source),
2755 CGM.getTBAAAccessInfo(RefTy));
2756 return EmitLoadOfReferenceLValue(RefLVal);
2757 }
2758
2759 /// Load a pointer with type \p PtrTy stored at address \p Ptr.
2760 /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
2761 /// it is loaded from.
2763 LValueBaseInfo *BaseInfo = nullptr,
2764 TBAAAccessInfo *TBAAInfo = nullptr);
2766
2767private:
2768 struct AllocaTracker {
2769 void Add(llvm::AllocaInst *I) { Allocas.push_back(I); }
2770 llvm::SmallVector<llvm::AllocaInst *> Take() { return std::move(Allocas); }
2771
2772 private:
2774 };
2775 AllocaTracker *Allocas = nullptr;
2776
2777public:
2778 // Captures all the allocas created during the scope of its RAII object.
2781 : CGF(CGF), OldTracker(CGF.Allocas) {
2782 CGF.Allocas = &Tracker;
2783 }
2784 ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; }
2785
2786 llvm::SmallVector<llvm::AllocaInst *> Take() { return Tracker.Take(); }
2787
2788 private:
2789 CodeGenFunction &CGF;
2790 AllocaTracker *OldTracker;
2791 AllocaTracker Tracker;
2792 };
2793
2794 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2795 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2796 /// insertion point of the builder. The caller is responsible for setting an
2797 /// appropriate alignment on
2798 /// the alloca.
2799 ///
2800 /// \p ArraySize is the number of array elements to be allocated if it
2801 /// is not nullptr.
2802 ///
2803 /// LangAS::Default is the address space of pointers to local variables and
2804 /// temporaries, as exposed in the source language. In certain
2805 /// configurations, this is not the same as the alloca address space, and a
2806 /// cast is needed to lift the pointer from the alloca AS into
2807 /// LangAS::Default. This can happen when the target uses a restricted
2808 /// address space for the stack but the source language requires
2809 /// LangAS::Default to be a generic address space. The latter condition is
2810 /// common for most programming languages; OpenCL is an exception in that
2811 /// LangAS::Default is the private address space, which naturally maps
2812 /// to the stack.
2813 ///
2814 /// Because the address of a temporary is often exposed to the program in
2815 /// various ways, this function will perform the cast. The original alloca
2816 /// instruction is returned through \p Alloca if it is not nullptr.
2817 ///
2818 /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2819 /// more efficient if the caller knows that the address will not be exposed.
2820 llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2821 llvm::Value *ArraySize = nullptr);
2823 const Twine &Name = "tmp",
2824 llvm::Value *ArraySize = nullptr,
2825 RawAddress *Alloca = nullptr);
2827 const Twine &Name = "tmp",
2828 llvm::Value *ArraySize = nullptr);
2829
2830 /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2831 /// default ABI alignment of the given LLVM type.
2832 ///
2833 /// IMPORTANT NOTE: This is *not* generally the right alignment for
2834 /// any given AST type that happens to have been lowered to the
2835 /// given IR type. This should only ever be used for function-local,
2836 /// IR-driven manipulations like saving and restoring a value. Do
2837 /// not hand this address off to arbitrary IRGen routines, and especially
2838 /// do not pass it as an argument to a function that might expect a
2839 /// properly ABI-aligned value.
2841 const Twine &Name = "tmp");
2842
2843 /// CreateIRTemp - Create a temporary IR object of the given type, with
2844 /// appropriate alignment. This routine should only be used when an temporary
2845 /// value needs to be stored into an alloca (for example, to avoid explicit
2846 /// PHI construction), but the type is the IR type, not the type appropriate
2847 /// for storing in memory.
2848 ///
2849 /// That is, this is exactly equivalent to CreateMemTemp, but calling
2850 /// ConvertType instead of ConvertTypeForMem.
2851 RawAddress CreateIRTemp(QualType T, const Twine &Name = "tmp");
2852
2853 /// CreateMemTemp - Create a temporary memory object of the given type, with
2854 /// appropriate alignmen and cast it to the default address space. Returns
2855 /// the original alloca instruction by \p Alloca if it is not nullptr.
2856 RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp",
2857 RawAddress *Alloca = nullptr);
2859 const Twine &Name = "tmp",
2860 RawAddress *Alloca = nullptr);
2861
2862 /// CreateMemTemp - Create a temporary memory object of the given type, with
2863 /// appropriate alignmen without casting it to the default address space.
2864 RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2866 const Twine &Name = "tmp");
2867
2868 /// CreateAggTemp - Create a temporary memory object for the given
2869 /// aggregate type.
2870 AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
2871 RawAddress *Alloca = nullptr) {
2872 return AggValueSlot::forAddr(
2873 CreateMemTemp(T, Name, Alloca), T.getQualifiers(),
2876 }
2877
2878 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2879 /// expression and compare the result against zero, returning an Int1Ty value.
2880 llvm::Value *EvaluateExprAsBool(const Expr *E);
2881
2882 /// Retrieve the implicit cast expression of the rhs in a binary operator
2883 /// expression by passing pointers to Value and QualType
2884 /// This is used for implicit bitfield conversion checks, which
2885 /// must compare with the value before potential truncation.
2887 llvm::Value **Previous,
2888 QualType *SrcType);
2889
2890 /// Emit a check that an [implicit] conversion of a bitfield. It is not UB,
2891 /// so we use the value after conversion.
2892 void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType,
2893 llvm::Value *Dst, QualType DstType,
2894 const CGBitFieldInfo &Info,
2896
2897 /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
2898 void EmitIgnoredExpr(const Expr *E);
2899
2900 /// EmitAnyExpr - Emit code to compute the specified expression which can have
2901 /// any type. The result is returned as an RValue struct. If this is an
2902 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2903 /// the result should be returned.
2904 ///
2905 /// \param ignoreResult True if the resulting value isn't used.
2908 bool ignoreResult = false);
2909
2910 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2911 // or the value of the expression, depending on how va_list is defined.
2913
2914 /// Emit a "reference" to a __builtin_ms_va_list; this is
2915 /// always the value of the expression, because a __builtin_ms_va_list is a
2916 /// pointer to a char.
2918
2919 /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2920 /// always be accessible even if no aggregate location is provided.
2922
2923 /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2924 /// arbitrary expression into the given memory location.
2925 void EmitAnyExprToMem(const Expr *E, Address Location,
2926 Qualifiers Quals, bool IsInitializer);
2927
2928 void EmitAnyExprToExn(const Expr *E, Address Addr);
2929
2930 /// EmitExprAsInit - Emits the code necessary to initialize a
2931 /// location in memory with the given initializer.
2932 void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2933 bool capturedByInit);
2934
2935 /// hasVolatileMember - returns true if aggregate type has a volatile
2936 /// member.
2938 if (const RecordType *RT = T->getAs<RecordType>()) {
2939 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
2940 return RD->hasVolatileMember();
2941 }
2942 return false;
2943 }
2944
2945 /// Determine whether a return value slot may overlap some other object.
2947 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2948 // class subobjects. These cases may need to be revisited depending on the
2949 // resolution of the relevant core issue.
2951 }
2952
2953 /// Determine whether a field initialization may overlap some other object.
2955
2956 /// Determine whether a base class initialization may overlap some other
2957 /// object.
2959 const CXXRecordDecl *BaseRD,
2960 bool IsVirtual);
2961
2962 /// Emit an aggregate assignment.
2964 bool IsVolatile = hasVolatileMember(EltTy);
2965 EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
2966 }
2967
2969 AggValueSlot::Overlap_t MayOverlap) {
2970 EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
2971 }
2972
2973 /// EmitAggregateCopy - Emit an aggregate copy.
2974 ///
2975 /// \param isVolatile \c true iff either the source or the destination is
2976 /// volatile.
2977 /// \param MayOverlap Whether the tail padding of the destination might be
2978 /// occupied by some other object. More efficient code can often be
2979 /// generated if not.
2981 AggValueSlot::Overlap_t MayOverlap,
2982 bool isVolatile = false);
2983
2984 /// GetAddrOfLocalVar - Return the address of a local variable.
2986 auto it = LocalDeclMap.find(VD);
2987 assert(it != LocalDeclMap.end() &&
2988 "Invalid argument to GetAddrOfLocalVar(), no decl!");
2989 return it->second;
2990 }
2991
2992 /// Given an opaque value expression, return its LValue mapping if it exists,
2993 /// otherwise create one.
2995
2996 /// Given an opaque value expression, return its RValue mapping if it exists,
2997 /// otherwise create one.
2999
3000 /// Get the index of the current ArrayInitLoopExpr, if any.
3001 llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
3002
3003 /// getAccessedFieldNo - Given an encoded value and a result number, return
3004 /// the input field number being accessed.
3005 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
3006
3007 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
3008 llvm::BasicBlock *GetIndirectGotoBlock();
3009
3010 /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
3011 static bool IsWrappedCXXThis(const Expr *E);
3012
3013 /// EmitNullInitialization - Generate code to set a value of the given type to
3014 /// null, If the type contains data member pointers, they will be initialized
3015 /// to -1 in accordance with the Itanium C++ ABI.
3017
3018 /// Emits a call to an LLVM variable-argument intrinsic, either
3019 /// \c llvm.va_start or \c llvm.va_end.
3020 /// \param ArgValue A reference to the \c va_list as emitted by either
3021 /// \c EmitVAListRef or \c EmitMSVAListRef.
3022 /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
3023 /// calls \c llvm.va_end.
3024 llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
3025
3026 /// Generate code to get an argument from the passed in pointer
3027 /// and update it accordingly.
3028 /// \param VE The \c VAArgExpr for which to generate code.
3029 /// \param VAListAddr Receives a reference to the \c va_list as emitted by
3030 /// either \c EmitVAListRef or \c EmitMSVAListRef.
3031 /// \returns A pointer to the argument.
3032 // FIXME: We should be able to get rid of this method and use the va_arg
3033 // instruction in LLVM instead once it works well enough.
3036
3037 /// emitArrayLength - Compute the length of an array, even if it's a
3038 /// VLA, and drill down to the base element type.
3040 QualType &baseType,
3041 Address &addr);
3042
3043 /// EmitVLASize - Capture all the sizes for the VLA expressions in
3044 /// the given variably-modified type and store them in the VLASizeMap.
3045 ///
3046 /// This function can be called with a null (unreachable) insert point.
3048
3050 llvm::Value *NumElts;
3052
3053 VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
3054 };
3055
3056 /// Return the number of elements for a single dimension
3057 /// for the given array type.
3060
3061 /// Returns an LLVM value that corresponds to the size,
3062 /// in non-variably-sized elements, of a variable length array type,
3063 /// plus that largest non-variably-sized element type. Assumes that
3064 /// the type has already been emitted with EmitVariablyModifiedType.
3067
3068 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
3069 /// generating code for an C++ member function.
3070 llvm::Value *LoadCXXThis() {
3071 assert(CXXThisValue && "no 'this' value for this function");
3072 return CXXThisValue;
3073 }
3075
3076 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
3077 /// virtual bases.
3078 // FIXME: Every place that calls LoadCXXVTT is something
3079 // that needs to be abstracted properly.
3080 llvm::Value *LoadCXXVTT() {
3081 assert(CXXStructorImplicitParamValue && "no VTT value for this function");
3082 return CXXStructorImplicitParamValue;
3083 }
3084
3085 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
3086 /// complete class to the given direct base.
3087 Address
3089 const CXXRecordDecl *Derived,
3090 const CXXRecordDecl *Base,
3091 bool BaseIsVirtual);
3092
3093 static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
3094
3095 /// GetAddressOfBaseClass - This function will add the necessary delta to the
3096 /// load of 'this' and returns address of the base class.
3098 const CXXRecordDecl *Derived,
3101 bool NullCheckValue, SourceLocation Loc);
3102
3104 const CXXRecordDecl *Derived,
3107 bool NullCheckValue);
3108
3109 /// GetVTTParameter - Return the VTT parameter that should be passed to a
3110 /// base constructor/destructor with virtual bases.
3111 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
3112 /// to ItaniumCXXABI.cpp together with all the references to VTT.
3113 llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
3114 bool Delegating);
3115
3117 CXXCtorType CtorType,
3118 const FunctionArgList &Args,
3120 // It's important not to confuse this and the previous function. Delegating
3121 // constructors are the C++0x feature. The constructor delegate optimization
3122 // is used to reduce duplication in the base and complete consturctors where
3123 // they are substantially the same.
3125 const FunctionArgList &Args);
3126
3127 /// Emit a call to an inheriting constructor (that is, one that invokes a
3128 /// constructor inherited from a base class) by inlining its definition. This
3129 /// is necessary if the ABI does not support forwarding the arguments to the
3130 /// base class constructor (because they're variadic or similar).
3132 CXXCtorType CtorType,
3133 bool ForVirtualBase,
3134 bool Delegating,
3135 CallArgList &Args);
3136
3137 /// Emit a call to a constructor inherited from a base class, passing the
3138 /// current constructor's arguments along unmodified (without even making
3139 /// a copy).
3141 bool ForVirtualBase, Address This,
3142 bool InheritedFromVBase,
3144
3146 bool ForVirtualBase, bool Delegating,
3147 AggValueSlot ThisAVS, const CXXConstructExpr *E);
3148
3150 bool ForVirtualBase, bool Delegating,
3151 Address This, CallArgList &Args,
3153 SourceLocation Loc, bool NewPointerIsChecked);
3154
3155 /// Emit assumption load for all bases. Requires to be called only on
3156 /// most-derived class and not under construction of the object.
3158
3159 /// Emit assumption that vptr load == global vtable.
3160 void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
3161
3163 Address This, Address Src,
3164 const CXXConstructExpr *E);
3165
3167 const ArrayType *ArrayTy,
3168 Address ArrayPtr,
3169 const CXXConstructExpr *E,
3170 bool NewPointerIsChecked,
3171 bool ZeroInitialization = false);
3172
3174 llvm::Value *NumElements,
3175 Address ArrayPtr,
3176 const CXXConstructExpr *E,
3177 bool NewPointerIsChecked,
3178 bool ZeroInitialization = false);
3179
3181
3183 bool ForVirtualBase, bool Delegating, Address This,
3184 QualType ThisTy);
3185
3187 llvm::Type *ElementTy, Address NewPtr,
3188 llvm::Value *NumElements,
3189 llvm::Value *AllocSizeWithoutCookie);
3190
3191 void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
3192 Address Ptr);
3193
3198
3199 llvm::Value *EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr);
3200 void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
3201
3202 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
3204
3205 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
3206 QualType DeleteTy, llvm::Value *NumElements = nullptr,
3207 CharUnits CookieSize = CharUnits());
3208
3210 const CallExpr *TheCallExpr, bool IsDelete);
3211
3212 llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
3213 llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
3215
3216 /// Situations in which we might emit a check for the suitability of a
3217 /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
3218 /// compiler-rt.
3220 /// Checking the operand of a load. Must be suitably sized and aligned.
3222 /// Checking the destination of a store. Must be suitably sized and aligned.
3224 /// Checking the bound value in a reference binding. Must be suitably sized
3225 /// and aligned, but is not required to refer to an object (until the
3226 /// reference is used), per core issue 453.
3228 /// Checking the object expression in a non-static data member access. Must
3229 /// be an object within its lifetime.
3231 /// Checking the 'this' pointer for a call to a non-static member function.
3232 /// Must be an object within its lifetime.
3234 /// Checking the 'this' pointer for a constructor call.
3236 /// Checking the operand of a static_cast to a derived pointer type. Must be
3237 /// null or an object within its lifetime.
3239 /// Checking the operand of a static_cast to a derived reference type. Must
3240 /// be an object within its lifetime.
3242 /// Checking the operand of a cast to a base object. Must be suitably sized
3243 /// and aligned.
3245 /// Checking the operand of a cast to a virtual base object. Must be an
3246 /// object within its lifetime.
3248 /// Checking the value assigned to a _Nonnull pointer. Must not be null.
3250 /// Checking the operand of a dynamic_cast or a typeid expression. Must be
3251 /// null or an object within its lifetime.
3254
3255 /// Determine whether the pointer type check \p TCK permits null pointers.
3257
3258 /// Determine whether the pointer type check \p TCK requires a vptr check.
3260
3261 /// Whether any type-checking sanitizers are enabled. If \c false,
3262 /// calls to EmitTypeCheck can be skipped.
3264
3266 QualType Type, SanitizerSet SkippedChecks = SanitizerSet(),
3267 llvm::Value *ArraySize = nullptr) {
3269 return;
3270 EmitTypeCheck(TCK, Loc, LV.emitRawPointer(*this), Type, LV.getAlignment(),
3271 SkippedChecks, ArraySize);
3272 }
3273
3275 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3276 SanitizerSet SkippedChecks = SanitizerSet(),
3277 llvm::Value *ArraySize = nullptr) {
3279 return;
3280 EmitTypeCheck(TCK, Loc, Addr.emitRawPointer(*this), Type, Alignment,
3281 SkippedChecks, ArraySize);
3282 }
3283
3284 /// Emit a check that \p V is the address of storage of the
3285 /// appropriate size and alignment for an object of type \p Type
3286 /// (or if ArraySize is provided, for an array of that bound).
3288 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3289 SanitizerSet SkippedChecks = SanitizerSet(),
3290 llvm::Value *ArraySize = nullptr);
3291
3292 /// Emit a check that \p Base points into an array object, which
3293 /// we can access at index \p Index. \p Accessed should be \c false if we
3294 /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
3295 void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
3296 QualType IndexType, bool Accessed);
3297 void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
3298 llvm::Value *Index, QualType IndexType,
3299 QualType IndexedType, bool Accessed);
3300
3301 // Find a struct's flexible array member and get its offset. It may be
3302 // embedded inside multiple sub-structs, but must still be the last field.
3303 const FieldDecl *
3305 const FieldDecl *FAMDecl,
3306 uint64_t &Offset);
3307
3308 /// Build an expression accessing the "counted_by" field.
3310 const FieldDecl *FAMDecl,
3311 const FieldDecl *CountDecl);
3312
3314 bool isInc, bool isPre);
3316 bool isInc, bool isPre);
3317
3318 /// Converts Location to a DebugLoc, if debug information is enabled.
3319 llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
3320
3321 /// Get the record field index as represented in debug info.
3322 unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex);
3323
3324
3325 //===--------------------------------------------------------------------===//
3326 // Declaration Emission
3327 //===--------------------------------------------------------------------===//
3328
3329 /// EmitDecl - Emit a declaration.
3330 ///
3331 /// This function can be called with a null (unreachable) insert point.
3332 void EmitDecl(const Decl &D);
3333
3334 /// EmitVarDecl - Emit a local variable declaration.
3335 ///
3336 /// This function can be called with a null (unreachable) insert point.
3337 void EmitVarDecl(const VarDecl &D);
3338
3339 void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
3340 bool capturedByInit);
3341
3343 llvm::Value *Address);
3344
3345 /// Determine whether the given initializer is trivial in the sense
3346 /// that it requires no code to be generated.
3348
3349 /// EmitAutoVarDecl - Emit an auto variable declaration.
3350 ///
3351 /// This function can be called with a null (unreachable) insert point.
3353
3355 friend class CodeGenFunction;
3356
3357 const VarDecl *Variable;
3358
3359 /// The address of the alloca for languages with explicit address space
3360 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
3361 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
3362 /// as a global constant.
3363 Address Addr;
3364
3365 llvm::Value *NRVOFlag;
3366
3367 /// True if the variable is a __block variable that is captured by an
3368 /// escaping block.
3369 bool IsEscapingByRef;
3370
3371 /// True if the variable is of aggregate type and has a constant
3372 /// initializer.
3373 bool IsConstantAggregate;
3374
3375 /// Non-null if we should use lifetime annotations.
3376 llvm::Value *SizeForLifetimeMarkers;
3377
3378 /// Address with original alloca instruction. Invalid if the variable was
3379 /// emitted as a global constant.
3380 RawAddress AllocaAddr;
3381
3382 struct Invalid {};
3383 AutoVarEmission(Invalid)
3384 : Variable(nullptr), Addr(Address::invalid()),
3385 AllocaAddr(RawAddress::invalid()) {}
3386
3387 AutoVarEmission(const VarDecl &variable)
3388 : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
3389 IsEscapingByRef(false), IsConstantAggregate(false),
3390 SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
3391
3392 bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
3393
3394 public:
3395 static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
3396
3397 bool useLifetimeMarkers() const {
3398 return SizeForLifetimeMarkers != nullptr;
3399 }
3400 llvm::Value *getSizeForLifetimeMarkers() const {
3401 assert(useLifetimeMarkers());
3402 return SizeForLifetimeMarkers;
3403 }
3404
3405 /// Returns the raw, allocated address, which is not necessarily
3406 /// the address of the object itself. It is casted to default
3407 /// address space for address space agnostic languages.
3409 return Addr;
3410 }
3411
3412 /// Returns the address for the original alloca instruction.
3413 RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; }
3414
3415 /// Returns the address of the object within this declaration.
3416 /// Note that this does not chase the forwarding pointer for
3417 /// __block decls.
3419 if (!IsEscapingByRef) return Addr;
3420
3421 return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
3422 }
3423 };
3425 void EmitAutoVarInit(const AutoVarEmission &emission);
3428 QualType::DestructionKind dtorKind);
3429
3430 /// Emits the alloca and debug information for the size expressions for each
3431 /// dimension of an array. It registers the association of its (1-dimensional)
3432 /// QualTypes and size expression's debug node, so that CGDebugInfo can
3433 /// reference this node when creating the DISubrange object to describe the
3434 /// array types.
3436 const VarDecl &D,
3437 bool EmitDebugInfo);
3438
3440 llvm::GlobalValue::LinkageTypes Linkage);
3441
3443 union {
3445 llvm::Value *Value;
3446 };
3447
3448 bool IsIndirect;
3449
3450 ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {}
3451 ParamValue(Address A) : Addr(A), IsIndirect(true) {}
3452
3453 public:
3454 static ParamValue forDirect(llvm::Value *value) {
3455 return ParamValue(value);
3456 }
3458 assert(!addr.getAlignment().isZero());
3459 return ParamValue(addr);
3460 }
3461
3462 bool isIndirect() const { return IsIndirect; }
3463 llvm::Value *getAnyValue() const {
3464 if (!isIndirect())
3465 return Value;
3466 assert(!Addr.hasOffset() && "unexpected offset");
3467 return Addr.getBasePointer();
3468 }
3469
3470 llvm::Value *getDirectValue() const {
3471 assert(!isIndirect());
3472 return Value;
3473 }
3474
3476 assert(isIndirect());
3477 return Addr;
3478 }
3479 };
3480
3481 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
3482 void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
3483
3484 /// protectFromPeepholes - Protect a value that we're intending to
3485 /// store to the side, but which will probably be used later, from
3486 /// aggressive peepholing optimizations that might delete it.
3487 ///
3488 /// Pass the result to unprotectFromPeepholes to declare that
3489 /// protection is no longer required.
3490 ///
3491 /// There's no particular reason why this shouldn't apply to
3492 /// l-values, it's just that no existing peepholes work on pointers.
3495
3496 void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
3498 SourceLocation AssumptionLoc,
3499 llvm::Value *Alignment,
3500 llvm::Value *OffsetValue,
3501 llvm::Value *TheCheck,
3502 llvm::Instruction *Assumption);
3503
3504 void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
3505 SourceLocation Loc, SourceLocation AssumptionLoc,
3506 llvm::Value *Alignment,
3507 llvm::Value *OffsetValue = nullptr);
3508
3509 void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
3510 SourceLocation AssumptionLoc,
3511 llvm::Value *Alignment,
3512 llvm::Value *OffsetValue = nullptr);
3513
3514 //===--------------------------------------------------------------------===//
3515 // Statement Emission
3516 //===--------------------------------------------------------------------===//
3517
3518 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
3519 void EmitStopPoint(const Stmt *S);
3520
3521 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
3522 /// this function even if there is no current insertion point.
3523 ///
3524 /// This function may clear the current insertion point; callers should use
3525 /// EnsureInsertPoint if they wish to subsequently generate code without first
3526 /// calling EmitBlock, EmitBranch, or EmitStmt.
3527 void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = std::nullopt);
3528
3529 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
3530 /// necessarily require an insertion point or debug information; typically
3531 /// because the statement amounts to a jump or a container of other
3532 /// statements.
3533 ///
3534 /// \return True if the statement was handled.
3536
3537 Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
3540 bool GetLast = false,
3541 AggValueSlot AVS =
3543
3544 /// EmitLabel - Emit the block for the given label. It is legal to call this
3545 /// function even if there is no current insertion point.
3546 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
3547
3548 void EmitLabelStmt(const LabelStmt &S);
3550 void EmitGotoStmt(const GotoStmt &S);
3552 void EmitIfStmt(const IfStmt &S);
3553
3555 ArrayRef<const Attr *> Attrs = std::nullopt);
3556 void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = std::nullopt);
3557 void EmitForStmt(const ForStmt &S,
3558 ArrayRef<const Attr *> Attrs = std::nullopt);
3560 void EmitDeclStmt(const DeclStmt &S);
3561 void EmitBreakStmt(const BreakStmt &S);
3567 void EmitAsmStmt(const AsmStmt &S);
3568
3574
3579 bool ignoreResult = false);
3583 bool ignoreResult = false);
3585 RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
3586
3587 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3588 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3589
3595 void VolatilizeTryBlocks(llvm::BasicBlock *BB,
3597
3599 llvm::Function *FinallyFunc);
3601 const Stmt *OutlinedStmt);
3602
3604 const SEHExceptStmt &Except);
3605
3607 const SEHFinallyStmt &Finally);
3608
3610 llvm::Value *ParentFP,
3611 llvm::Value *EntryEBP);
3612 llvm::Value *EmitSEHExceptionCode();
3613 llvm::Value *EmitSEHExceptionInfo();
3615
3616 /// Emit simple code for OpenMP directives in Simd-only mode.
3618
3619 /// Scan the outlined statement for captures from the parent function. For
3620 /// each capture, mark the capture as escaped and emit a call to
3621 /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
3623 bool IsFilter);
3624
3625 /// Recovers the address of a local in a parent function. ParentVar is the
3626 /// address of the variable used in the immediate parent function. It can
3627 /// either be an alloca or a call to llvm.localrecover if there are nested
3628 /// outlined functions. ParentFP is the frame pointer of the outermost parent
3629 /// frame.
3631 Address ParentVar,
3632 llvm::Value *ParentFP);
3633
3635 ArrayRef<const Attr *> Attrs = std::nullopt);
3636
3637 /// Controls insertion of cancellation exit blocks in worksharing constructs.
3639 CodeGenFunction &CGF;
3640
3641 public:
3643 bool HasCancel)
3644 : CGF(CGF) {
3645 CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
3646 }
3647 ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
3648 };
3649
3650 /// Returns calculated size of the specified type.
3651 llvm::Value *getTypeSize(QualType Ty);
3659 SmallVectorImpl<llvm::Value *> &CapturedVars);
3660 void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
3662 /// Perform element by element copying of arrays with type \a
3663 /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
3664 /// generated by \a CopyGen.
3665 ///
3666 /// \param DestAddr Address of the destination array.
3667 /// \param SrcAddr Address of the source array.
3668 /// \param OriginalType Type of destination and source arrays.
3669 /// \param CopyGen Copying procedure that copies value of single array element
3670 /// to another single array element.
3672 Address DestAddr, Address SrcAddr, QualType OriginalType,
3673 const llvm::function_ref<void(Address, Address)> CopyGen);
3674 /// Emit proper copying of data from one variable to another.
3675 ///
3676 /// \param OriginalType Original type of the copied variables.
3677 /// \param DestAddr Destination address.
3678 /// \param SrcAddr Source address.
3679 /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
3680 /// type of the base array element).
3681 /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
3682 /// the base array element).
3683 /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
3684 /// DestVD.
3685 void EmitOMPCopy(QualType OriginalType,
3686 Address DestAddr, Address SrcAddr,
3687 const VarDecl *DestVD, const VarDecl *SrcVD,
3688 const Expr *Copy);
3689 /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
3690 /// \a X = \a E \a BO \a E.
3691 ///
3692 /// \param X Value to be updated.
3693 /// \param E Update value.
3694 /// \param BO Binary operation for update operation.
3695 /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3696 /// expression, false otherwise.
3697 /// \param AO Atomic ordering of the generated atomic instructions.
3698 /// \param CommonGen Code generator for complex expressions that cannot be
3699 /// expressed through atomicrmw instruction.
3700 /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3701 /// generated, <false, RValue::get(nullptr)> otherwise.
3702 std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
3703 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3704 llvm::AtomicOrdering AO, SourceLocation Loc,
3705 const llvm::function_ref<RValue(RValue)> CommonGen);
3707 OMPPrivateScope &PrivateScope);
3709 OMPPrivateScope &PrivateScope);
3711 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
3712 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3713 CaptureDeviceAddrMap);
3715 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
3716 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3717 CaptureDeviceAddrMap);
3718 /// Emit code for copyin clause in \a D directive. The next code is
3719 /// generated at the start of outlined functions for directives:
3720 /// \code
3721 /// threadprivate_var1 = master_threadprivate_var1;
3722 /// operator=(threadprivate_var2, master_threadprivate_var2);
3723 /// ...
3724 /// __kmpc_barrier(&loc, global_tid);
3725 /// \endcode
3726 ///
3727 /// \param D OpenMP directive possibly with 'copyin' clause(s).
3728 /// \returns true if at least one copyin variable is found, false otherwise.
3730 /// Emit initial code for lastprivate variables. If some variable is
3731 /// not also firstprivate, then the default initialization is used. Otherwise
3732 /// initialization of this variable is performed by EmitOMPFirstprivateClause
3733 /// method.
3734 ///
3735 /// \param D Directive that may have 'lastprivate' directives.
3736 /// \param PrivateScope Private scope for capturing lastprivate variables for
3737 /// proper codegen in internal captured statement.
3738 ///
3739 /// \returns true if there is at least one lastprivate variable, false
3740 /// otherwise.
3742 OMPPrivateScope &PrivateScope);
3743 /// Emit final copying of lastprivate values to original variables at
3744 /// the end of the worksharing or simd directive.
3745 ///
3746 /// \param D Directive that has at least one 'lastprivate' directives.
3747 /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3748 /// it is the last iteration of the loop code in associated directive, or to
3749 /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3751 bool NoFinals,
3752 llvm::Value *IsLastIterCond = nullptr);
3753 /// Emit initial code for linear clauses.
3755 CodeGenFunction::OMPPrivateScope &PrivateScope);
3756 /// Emit final code for linear clauses.
3757 /// \param CondGen Optional conditional code for final part of codegen for
3758 /// linear clause.
3760 const OMPLoopDirective &D,
3761 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3762 /// Emit initial code for reduction variables. Creates reduction copies
3763 /// and initializes them with the values according to OpenMP standard.
3764 ///
3765 /// \param D Directive (possibly) with the 'reduction' clause.
3766 /// \param PrivateScope Private scope for capturing reduction variables for
3767 /// proper codegen in internal captured statement.
3768 ///
3770 OMPPrivateScope &PrivateScope,
3771 bool ForInscan = false);
3772 /// Emit final update of reduction values to original variables at
3773 /// the end of the directive.
3774 ///
3775 /// \param D Directive that has at least one 'reduction' directives.
3776 /// \param ReductionKind The kind of reduction to perform.
3778 const OpenMPDirectiveKind ReductionKind);
3779 /// Emit initial code for linear variables. Creates private copies
3780 /// and initializes them with the values according to OpenMP standard.
3781 ///
3782 /// \param D Directive (possibly) with the 'linear' clause.
3783 /// \return true if at least one linear variable is found that should be
3784 /// initialized with the value of the original variable, false otherwise.
3786
3787 typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3788 llvm::Function * /*OutlinedFn*/,
3789 const OMPTaskDataTy & /*Data*/)>
3792 const OpenMPDirectiveKind CapturedRegion,
3793 const RegionCodeGenTy &BodyGen,
3794 const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3801 explicit OMPTargetDataInfo() = default;
3804 unsigned NumberOfTargetItems)
3808 };
3810 const RegionCodeGenTy &BodyGen,
3811 OMPTargetDataInfo &InputInfo);
3814 CodeGenFunction &CGF,
3815 const CapturedStmt *CS,
3853 void
3856 void
3863 void
3879 void
3904
3905 /// Emit device code for the target directive.
3907 StringRef ParentName,
3908 const OMPTargetDirective &S);
3909 static void
3912 /// Emit device code for the target parallel for directive.
3914 CodeGenModule &CGM, StringRef ParentName,
3916 /// Emit device code for the target parallel for simd directive.
3918 CodeGenModule &CGM, StringRef ParentName,
3920 /// Emit device code for the target teams directive.
3921 static void
3923 const OMPTargetTeamsDirective &S);
3924 /// Emit device code for the target teams distribute directive.
3926 CodeGenModule &CGM, StringRef ParentName,
3928 /// Emit device code for the target teams distribute simd directive.
3930 CodeGenModule &CGM, StringRef ParentName,
3932 /// Emit device code for the target simd directive.
3934 StringRef ParentName,
3935 const OMPTargetSimdDirective &S);
3936 /// Emit device code for the target teams distribute parallel for simd
3937 /// directive.
3939 CodeGenModule &CGM, StringRef ParentName,
3941
3942 /// Emit device code for the target teams loop directive.
3944 CodeGenModule &CGM, StringRef ParentName,
3946
3947 /// Emit device code for the target parallel loop directive.
3949 CodeGenModule &CGM, StringRef ParentName,
3951
3953 CodeGenModule &CGM, StringRef ParentName,
3955
3956 /// Emit the Stmt \p S and return its topmost canonical loop, if any.
3957 /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
3958 /// future it is meant to be the number of loops expected in the loop nests
3959 /// (usually specified by the "collapse" clause) that are collapsed to a
3960 /// single loop by this function.
3961 llvm::CanonicalLoopInfo *EmitOMPCollapsedCanonicalLoopNest(const Stmt *S,
3962 int Depth);
3963
3964 /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
3966
3967 /// Emit inner loop of the worksharing/simd construct.
3968 ///
3969 /// \param S Directive, for which the inner loop must be emitted.
3970 /// \param RequiresCleanup true, if directive has some associated private
3971 /// variables.
3972 /// \param LoopCond Bollean condition for loop continuation.
3973 /// \param IncExpr Increment expression for loop control variable.
3974 /// \param BodyGen Generator for the inner body of the inner loop.
3975 /// \param PostIncGen Genrator for post-increment code (required for ordered
3976 /// loop directvies).
3978 const OMPExecutableDirective &S, bool RequiresCleanup,
3979 const Expr *LoopCond, const Expr *IncExpr,
3980 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
3981 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
3982
3984 /// Emit initial code for loop counters of loop-based directives.
3986 OMPPrivateScope &LoopScope);
3987
3988 /// Helper for the OpenMP loop directives.
3990
3991 /// Emit code for the worksharing loop-based directive.
3992 /// \return true, if this construct has any lastprivate clause, false -
3993 /// otherwise.
3995 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3996 const CodeGenDispatchBoundsTy &CGDispatchBounds);
3997
3998 /// Emit code for the distribute loop-based directive.
4000 const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
4001
4002 /// Helpers for the OpenMP loop directives.
4005 const OMPLoopDirective &D,
4006 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
4007
4008 /// Emits the lvalue for the expression with possibly captured variable.
4010
4011private:
4012 /// Helpers for blocks.
4013 llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
4014
4015 /// struct with the values to be passed to the OpenMP loop-related functions
4016 struct OMPLoopArguments {
4017 /// loop lower bound
4019 /// loop upper bound
4021 /// loop stride
4023 /// isLastIteration argument for runtime functions
4025 /// Chunk value generated by sema
4026 llvm::Value *Chunk = nullptr;
4027 /// EnsureUpperBound
4028 Expr *EUB = nullptr;
4029 /// IncrementExpression
4030 Expr *IncExpr = nullptr;
4031 /// Loop initialization
4032 Expr *Init = nullptr;
4033 /// Loop exit condition
4034 Expr *Cond = nullptr;
4035 /// Update of LB after a whole chunk has been executed
4036 Expr *NextLB = nullptr;
4037 /// Update of UB after a whole chunk has been executed
4038 Expr *NextUB = nullptr;
4039 /// Distinguish between the for distribute and sections
4040 OpenMPDirectiveKind DKind = llvm::omp::OMPD_unknown;
4041 OMPLoopArguments() = default;
4042 OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
4043 llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
4044 Expr *IncExpr = nullptr, Expr *Init = nullptr,
4045 Expr *Cond = nullptr, Expr *NextLB = nullptr,
4046 Expr *NextUB = nullptr)
4047 : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
4048 IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
4049 NextUB(NextUB) {}
4050 };
4051 void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
4052 const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
4053 const OMPLoopArguments &LoopArgs,
4054 const CodeGenLoopTy &CodeGenLoop,
4055 const CodeGenOrderedTy &CodeGenOrdered);
4056 void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
4057 bool IsMonotonic, const OMPLoopDirective &S,
4058 OMPPrivateScope &LoopScope, bool Ordered,
4059 const OMPLoopArguments &LoopArgs,
4060 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4061 void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
4062 const OMPLoopDirective &S,
4063 OMPPrivateScope &LoopScope,
4064 const OMPLoopArguments &LoopArgs,
4065 const CodeGenLoopTy &CodeGenLoopContent);
4066 /// Emit code for sections directive.
4067 void EmitSections(const OMPExecutableDirective &S);
4068
4069public:
4070 //===--------------------------------------------------------------------===//
4071 // OpenACC Emission
4072 //===--------------------------------------------------------------------===//
4074 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4075 // simply emitting its structured block, but in the future we will implement
4076 // some sort of IR.
4077 EmitStmt(S.getStructuredBlock());
4078 }
4079
4081 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4082 // simply emitting its loop, but in the future we will implement
4083 // some sort of IR.
4084 EmitStmt(S.getLoop());
4085 }
4086
4087 //===--------------------------------------------------------------------===//
4088 // LValue Expression Emission
4089 //===--------------------------------------------------------------------===//
4090
4091 /// Create a check that a scalar RValue is non-null.
4093
4094 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
4096
4097 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
4098 /// and issue an ErrorUnsupported style diagnostic (using the
4099 /// provided Name).
4101 const char *Name);
4102
4103 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
4104 /// an ErrorUnsupported style diagnostic (using the provided Name).
4106 const char *Name);
4107
4108 /// EmitLValue - Emit code to compute a designator that specifies the location
4109 /// of the expression.
4110 ///
4111 /// This can return one of two things: a simple address or a bitfield
4112 /// reference. In either case, the LLVM Value* in the LValue structure is
4113 /// guaranteed to be an LLVM pointer type.
4114 ///
4115 /// If this returns a bitfield reference, nothing about the pointee type of
4116 /// the LLVM value is known: For example, it may not be a pointer to an
4117 /// integer.
4118 ///
4119 /// If this returns a normal address, and if the lvalue's C type is fixed
4120 /// size, this method guarantees that the returned pointer type will point to
4121 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
4122 /// variable length type, this is not possible.
4123 ///
4125 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
4126
4127private:
4128 LValue EmitLValueHelper(const Expr *E, KnownNonNull_t IsKnownNonNull);
4129
4130public:
4131 /// Same as EmitLValue but additionally we generate checking code to
4132 /// guard against undefined behavior. This is only suitable when we know
4133 /// that the address will be used to access the object.
4135
4138
4139 void EmitAtomicInit(Expr *E, LValue lvalue);
4140
4142
4145
4147 llvm::AtomicOrdering AO, bool IsVolatile = false,
4149
4150 void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
4151
4152 void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
4153 bool IsVolatile, bool isInit);
4154
4155 std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
4157 llvm::AtomicOrdering Success =
4158 llvm::AtomicOrdering::SequentiallyConsistent,
4159 llvm::AtomicOrdering Failure =
4160 llvm::AtomicOrdering::SequentiallyConsistent,
4161 bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
4162
4163 /// Emit an atomicrmw instruction, and applying relevant metadata when
4164 /// applicable.
4165 llvm::AtomicRMWInst *emitAtomicRMWInst(
4166 llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val,
4167 llvm::AtomicOrdering Order = llvm::AtomicOrdering::SequentiallyConsistent,
4168 llvm::SyncScope::ID SSID = llvm::SyncScope::System);
4169
4170 void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
4171 const llvm::function_ref<RValue(RValue)> &UpdateOp,
4172 bool IsVolatile);
4173
4174 /// EmitToMemory - Change a scalar value from its value
4175 /// representation to its in-memory representation.
4176 llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
4177
4178 /// EmitFromMemory - Change a scalar value from its memory
4179 /// representation to its value representation.
4180 llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
4181
4182 /// Check if the scalar \p Value is within the valid range for the given
4183 /// type \p Ty.
4184 ///
4185 /// Returns true if a check is needed (even if the range is unknown).
4186 bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
4188
4189 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4190 /// care to appropriately convert from the memory representation to
4191 /// the LLVM value representation.
4192 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4195 bool isNontemporal = false) {
4196 return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source),
4197 CGM.getTBAAAccessInfo(Ty), isNontemporal);
4198 }
4199
4200 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4202 TBAAAccessInfo TBAAInfo,
4203 bool isNontemporal = false);
4204
4205 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4206 /// care to appropriately convert from the memory representation to
4207 /// the LLVM value representation. The l-value must be a simple
4208 /// l-value.
4210
4211 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4212 /// care to appropriately convert from the memory representation to
4213 /// the LLVM value representation.
4214 void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
4215 bool Volatile, QualType Ty,
4217 bool isInit = false, bool isNontemporal = false) {
4218 EmitStoreOfScalar(Value, Addr, Volatile, Ty, LValueBaseInfo(Source),
4219 CGM.getTBAAAccessInfo(Ty), isInit, isNontemporal);
4220 }
4221
4222 void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
4223 bool Volatile, QualType Ty,
4224 LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo,
4225 bool isInit = false, bool isNontemporal = false);
4226
4227 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4228 /// care to appropriately convert from the memory representation to
4229 /// the LLVM value representation. The l-value must be a simple
4230 /// l-value. The isInit flag indicates whether this is an initialization.
4231 /// If so, atomic qualifiers are ignored and the store is always non-atomic.
4232 void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
4233
4234 /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
4235 /// this method emits the address of the lvalue, then loads the result as an
4236 /// rvalue, returning the rvalue.
4241
4242 /// Like EmitLoadOfLValue but also handles complex and aggregate types.
4245 SourceLocation Loc = {});
4246
4247 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
4248 /// lvalue, where both are guaranteed to the have the same type, and that type
4249 /// is 'Ty'.
4250 void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
4253
4254 /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
4255 /// as EmitStoreThroughLValue.
4256 ///
4257 /// \param Result [out] - If non-null, this will be set to a Value* for the
4258 /// bit-field contents after the store, appropriate for use as the result of
4259 /// an assignment to the bit-field.
4261 llvm::Value **Result=nullptr);
4262
4263 /// Emit an l-value for an assignment (simple or compound) of complex type.
4267 llvm::Value *&Result);
4268
4269 // Note: only available for agg return types
4272 // Note: only available for agg return types
4274 // Note: only available for agg return types
4282 bool Accessed = false);
4285 bool IsLowerBound = true);
4296
4298
4300
4302 LValueBaseInfo *BaseInfo = nullptr,
4303 TBAAAccessInfo *TBAAInfo = nullptr);
4304
4306 llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
4307 ConstantEmission(llvm::Constant *C, bool isReference)
4308 : ValueAndIsReference(C, isReference) {}
4309 public:
4311 static ConstantEmission forReference(llvm::Constant *C) {
4312 return ConstantEmission(C, true);
4313 }
4314 static ConstantEmission forValue(llvm::Constant *C) {
4315 return ConstantEmission(C, false);
4316 }
4317
4318 explicit operator bool() const {
4319 return ValueAndIsReference.getOpaqueValue() != nullptr;
4320 }
4321
4322 bool isReference() const { return ValueAndIsReference.getInt(); }
4324 assert(isReference());
4325 return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
4326 refExpr->getType());
4327 }
4328
4329 llvm::Constant *getValue() const {
4330 assert(!isReference());
4331 return ValueAndIsReference.getPointer();
4332 }
4333 };
4334
4337 llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E);
4338
4342
4344 const ObjCIvarDecl *Ivar);
4346 const ObjCIvarDecl *Ivar);
4350 llvm::Value *ThisValue);
4351
4352 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
4353 /// if the Field is a reference, this will return the address of the reference
4354 /// and not the address of the value stored in the reference.
4356 const FieldDecl* Field);
4357
4359 llvm::Value* Base, const ObjCIvarDecl *Ivar,
4360 unsigned CVRQualifiers);
4361
4366
4373
4374 //===--------------------------------------------------------------------===//
4375 // Scalar Expression Emission
4376 //===--------------------------------------------------------------------===//
4377
4378 /// EmitCall - Generate a call of the given function, expecting the given
4379 /// result type, and using the given argument list which specifies both the
4380 /// LLVM arguments and the types they were derived from.
4381 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4383 llvm::CallBase **callOrInvoke, bool IsMustTail,
4385 bool IsVirtualFunctionPointerThunk = false);
4386 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4388 llvm::CallBase **callOrInvoke = nullptr,
4389 bool IsMustTail = false) {
4390 return EmitCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke,
4391 IsMustTail, SourceLocation());
4392 }
4393 RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
4394 ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr);
4399
4400 void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
4402
4403 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4404 const Twine &name = "");
4405 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4407 const Twine &name = "");
4408 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4409 const Twine &name = "");
4410 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4411 ArrayRef<Address> args,
4412 const Twine &name = "");
4413 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4415 const Twine &name = "");
4416
4418 getBundlesForFunclet(llvm::Value *Callee);
4419
4420 llvm::CallBase *EmitCallOrInvoke(llvm::FunctionCallee Callee,
4422 const Twine &Name = "");
4423 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4425 const Twine &name = "");
4426 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4427 const Twine &name = "");
4428 void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4430
4432 NestedNameSpecifier *Qual,
4433 llvm::Type *Ty);
4434
4437 const CXXRecordDecl *RD);
4438
4440
4441 /// Create the discriminator from the storage address and the entity hash.
4442 llvm::Value *EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress,
4443 llvm::Value *Discriminator);
4445 llvm::Value *StorageAddress,
4446 GlobalDecl SchemaDecl,
4447 QualType SchemaType);
4448
4449 llvm::Value *EmitPointerAuthSign(const CGPointerAuthInfo &Info,
4450 llvm::Value *Pointer);
4451
4452 llvm::Value *EmitPointerAuthAuth(const CGPointerAuthInfo &Info,
4453 llvm::Value *Pointer);
4454
4456 const CGPointerAuthInfo &CurAuthInfo,
4457 const CGPointerAuthInfo &NewAuthInfo,
4458 bool IsKnownNonNull);
4459 llvm::Value *emitPointerAuthResignCall(llvm::Value *Pointer,
4460 const CGPointerAuthInfo &CurInfo,
4461 const CGPointerAuthInfo &NewInfo);
4462
4464 const CGPointerAuthInfo &Info,
4466
4467 llvm::Value *authPointerToPointerCast(llvm::Value *ResultPtr,
4468 QualType SourceType, QualType DestType);
4470 QualType DestType);
4471
4473
4474 llvm::Value *getAsNaturalPointerTo(Address Addr, QualType PointeeType) {
4475 return getAsNaturalAddressOf(Addr, PointeeType).getBasePointer();
4476 }
4477
4478 // Return the copy constructor name with the prefix "__copy_constructor_"
4479 // removed.
4481 CharUnits Alignment,
4482 bool IsVolatile,
4483 ASTContext &Ctx);
4484
4485 // Return the destructor name with the prefix "__destructor_" removed.
4487 CharUnits Alignment,
4488 bool IsVolatile,
4489 ASTContext &Ctx);
4490
4491 // These functions emit calls to the special functions of non-trivial C
4492 // structs.
4500
4501 RValue
4503 const CGCallee &Callee,
4504 ReturnValueSlot ReturnValue, llvm::Value *This,
4505 llvm::Value *ImplicitParam,
4506 QualType ImplicitParamTy, const CallExpr *E,
4507 CallArgList *RtlArgs);
4509 llvm::Value *This, QualType ThisTy,
4510 llvm::Value *ImplicitParam,
4511 QualType ImplicitParamTy, const CallExpr *E);
4515 const CXXMethodDecl *MD,
4517 bool HasQualifier,
4518 NestedNameSpecifier *Qualifier,
4519 bool IsArrow, const Expr *Base);
4520 // Compute the object pointer.
4522 llvm::Value *memberPtr,
4523 const MemberPointerType *memberPtrType,
4524 LValueBaseInfo *BaseInfo = nullptr,
4525 TBAAAccessInfo *TBAAInfo = nullptr);
4528
4530 const CXXMethodDecl *MD,
4533
4536
4539
4540 RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
4542
4543 RValue emitRotate(const CallExpr *E, bool IsRotateRight);
4544
4545 /// Emit IR for __builtin_os_log_format.
4547
4548 /// Emit IR for __builtin_is_aligned.
4550 /// Emit IR for __builtin_align_up/__builtin_align_down.
4551 RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp);
4552
4555 CharUnits BufferAlignment);
4556
4558
4559 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
4560 /// is unhandled by the current target.
4561 llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4563
4564 llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
4565 const llvm::CmpInst::Predicate Fp,
4566 const llvm::CmpInst::Predicate Ip,
4567 const llvm::Twine &Name = "");
4568 llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4570 llvm::Triple::ArchType Arch);
4571 llvm::Value *EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4573 llvm::Triple::ArchType Arch);
4574 llvm::Value *EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4576 llvm::Triple::ArchType Arch);
4577 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy,
4578 QualType RTy);
4579 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy,
4580 QualType RTy);
4581
4582 llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
4583 unsigned LLVMIntrinsic,
4584 unsigned AltLLVMIntrinsic,
4585 const char *NameHint,
4586 unsigned Modifier,
4587 const CallExpr *E,
4589 Address PtrOp0, Address PtrOp1,
4590 llvm::Triple::ArchType Arch);
4591
4592 llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
4593 unsigned Modifier, llvm::Type *ArgTy,
4594 const CallExpr *E);
4595 llvm::Value *EmitNeonCall(llvm::Function *F,
4597 const char *name,
4598 unsigned shift = 0, bool rightshift = false);
4599 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
4600 const llvm::ElementCount &Count);
4601 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
4602 llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
4603 bool negateForRightShift);
4604 llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
4605 llvm::Type *Ty, bool usgn, const char *name);
4606 llvm::Value *vectorWrapScalar16(llvm::Value *Op);
4607 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
4608 /// access builtin. Only required if it can't be inferred from the base
4609 /// pointer operand.
4610 llvm::Type *SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags);
4611
4613 getSVEOverloadTypes(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType,
4615 llvm::Type *getEltType(const SVETypeFlags &TypeFlags);
4616 llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags);
4617 llvm::ScalableVectorType *getSVEPredType(const SVETypeFlags &TypeFlags);
4618 llvm::Value *EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
4619 llvm::Type *ReturnType,
4621 llvm::Value *EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
4622 llvm::Type *ReturnType,
4624 llvm::Value *EmitSVEAllTruePred(const SVETypeFlags &TypeFlags);
4625 llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
4626 llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty);
4627 llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty);
4628 llvm::Value *EmitSVEPMull(const SVETypeFlags &TypeFlags,
4630 unsigned BuiltinID);
4631 llvm::Value *EmitSVEMovl(const SVETypeFlags &TypeFlags,
4633 unsigned BuiltinID);
4634 llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred,
4635 llvm::ScalableVectorType *VTy);
4636 llvm::Value *EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
4638 unsigned IntID);
4639 llvm::Value *EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
4641 unsigned IntID);
4642 llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy,
4644 unsigned BuiltinID, bool IsZExtReturn);
4645 llvm::Value *EmitSVEMaskedStore(const CallExpr *,
4647 unsigned BuiltinID);
4648 llvm::Value *EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
4650 unsigned BuiltinID);
4651 llvm::Value *EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
4653 unsigned IntID);
4654 llvm::Value *EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
4656 unsigned IntID);
4657 llvm::Value *EmitSVEStructStore(const SVETypeFlags &TypeFlags,
4659 unsigned IntID);
4660 /// FormSVEBuiltinResult - Returns the struct of scalable vectors as a wider
4661 /// vector. It extracts the scalable vector from the struct and inserts into
4662 /// the wider vector. This avoids the error when allocating space in llvm
4663 /// for struct of scalable vectors if a function returns struct.
4664 llvm::Value *FormSVEBuiltinResult(llvm::Value *Call);
4665
4666 llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4667
4668 llvm::Value *EmitSMELd1St1(const SVETypeFlags &TypeFlags,
4670 unsigned IntID);
4671 llvm::Value *EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
4673 unsigned IntID);
4674 llvm::Value *EmitSMEZero(const SVETypeFlags &TypeFlags,
4676 unsigned IntID);
4677 llvm::Value *EmitSMELdrStr(const SVETypeFlags &TypeFlags,
4679 unsigned IntID);
4680
4681 void GetAArch64SVEProcessedOperands(unsigned BuiltinID, const CallExpr *E,
4683 SVETypeFlags TypeFlags);
4684
4685 llvm::Value *EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4686
4687 llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4688 llvm::Triple::ArchType Arch);
4689 llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4690
4692 llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4693 llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4694 llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4695 llvm::Value *EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4696 llvm::Value *EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx,
4697 const CallExpr *E);
4698 llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4699 llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4700 llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
4701 const CallExpr *E);
4702 llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4703 llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4705
4706 llvm::Value *EmitRISCVCpuSupports(const CallExpr *E);
4707 llvm::Value *EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs);
4708 llvm::Value *EmitRISCVCpuInit();
4709
4710 void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
4711 const CallExpr *E);
4712 void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
4713 llvm::AtomicOrdering &AO,
4714 llvm::SyncScope::ID &SSID);
4715
4716 enum class MSVCIntrin;
4717 llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
4718
4719 llvm::Value *EmitBuiltinAvailable(const VersionTuple &Version);
4720
4723 llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
4726 llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
4727 const ObjCMethodDecl *MethodWithObjects);
4730 ReturnValueSlot Return = ReturnValueSlot());
4731
4732 /// Retrieves the default cleanup kind for an ARC cleanup.
4733 /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
4735 return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
4737 }
4738
4739 // ARC primitives.
4740 void EmitARCInitWeak(Address addr, llvm::Value *value);
4742 llvm::Value *EmitARCLoadWeak(Address addr);
4744 llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
4745 void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4746 void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4749 llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
4750 llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
4751 llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
4752 bool resultIgnored);
4753 llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
4754 bool resultIgnored);
4755 llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
4756 llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
4757 llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
4759 void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4760 llvm::Value *EmitARCAutorelease(llvm::Value *value);
4761 llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
4762 llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
4763 llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
4764 llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
4765
4766 llvm::Value *EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType);
4767 llvm::Value *EmitObjCRetainNonBlock(llvm::Value *value,
4768 llvm::Type *returnType);
4769 void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4770
4771 std::pair<LValue,llvm::Value*>
4773 std::pair<LValue,llvm::Value*>
4774 EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
4775 std::pair<LValue,llvm::Value*>
4777
4778 llvm::Value *EmitObjCAlloc(llvm::Value *value,
4779 llvm::Type *returnType);
4780 llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
4781 llvm::Type *returnType);
4782 llvm::Value *EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType);
4783
4784 llvm::Value *EmitObjCThrowOperand(const Expr *expr);
4785 llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
4786 llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
4787
4788 llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
4789 llvm::Value *EmitARCReclaimReturnedObject(const Expr *e,
4790 bool allowUnsafeClaim);
4791 llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
4794
4796
4798
4804
4805 void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
4808 void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
4809 void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
4810
4811 /// Emits a reference binding to the passed in expression.
4813
4814 //===--------------------------------------------------------------------===//
4815 // Expression Emission
4816 //===--------------------------------------------------------------------===//
4817
4818 // Expressions are broken into three classes: scalar, complex, aggregate.
4819
4820 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
4821 /// scalar type, returning the result.
4822 llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
4823
4824 /// Emit a conversion from the specified type to the specified destination
4825 /// type, both of which are LLVM scalar types.
4826 llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
4827 QualType DstTy, SourceLocation Loc);
4828
4829 /// Emit a conversion from the specified complex type to the specified
4830 /// destination type, where the destination type is an LLVM scalar type.
4832 QualType DstTy,
4834
4835 /// EmitAggExpr - Emit the computation of the specified expression
4836 /// of aggregate type. The result is computed into the given slot,
4837 /// which may be null to indicate that the value is not needed.
4838 void EmitAggExpr(const Expr *E, AggValueSlot AS);
4839
4840 /// EmitAggExprToLValue - Emit the computation of the specified expression of
4841 /// aggregate type into a temporary LValue.
4843
4845
4846 /// EmitAggFinalDestCopy - Emit copy of the specified aggregate into
4847 /// destination address.
4849 ExprValueKind SrcKind);
4850
4851 /// Create a store to \arg DstPtr from \arg Src, truncating the stored value
4852 /// to at most \arg DstSize bytes.
4853 void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize,
4854 bool DstIsVolatile);
4855
4856 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
4857 /// make sure it survives garbage collection until this point.
4858 void EmitExtendGCLifetime(llvm::Value *object);
4859
4860 /// EmitComplexExpr - Emit the computation of the specified expression of
4861 /// complex type, returning the result.
4863 bool IgnoreReal = false,
4864 bool IgnoreImag = false);
4865
4866 /// EmitComplexExprIntoLValue - Emit the given expression of complex
4867 /// type and place its result into the specified l-value.
4868 void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
4869
4870 /// EmitStoreOfComplex - Store a complex number into the specified l-value.
4871 void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
4872
4873 /// EmitLoadOfComplex - Load a complex number from the specified l-value.
4875
4877 llvm::Value *EmitPromotedScalarExpr(const Expr *E, QualType PromotionType);
4880
4883
4884 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
4885 /// global variable that has already been created for it. If the initializer
4886 /// has a different type than GV does, this may free GV and return a different
4887 /// one. Otherwise it just returns GV.
4888 llvm::GlobalVariable *
4890 llvm::GlobalVariable *GV);
4891
4892 // Emit an @llvm.invariant.start call for the given memory region.
4893 void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size);
4894
4895 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
4896 /// variable with global storage.
4897 void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV,
4898 bool PerformInit);
4899
4900 llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
4901 llvm::Constant *Addr);
4902
4903 llvm::Function *createTLSAtExitStub(const VarDecl &VD,
4904 llvm::FunctionCallee Dtor,
4905 llvm::Constant *Addr,
4906 llvm::FunctionCallee &AtExit);
4907
4908 /// Call atexit() with a function that passes the given argument to
4909 /// the given function.
4910 void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
4911 llvm::Constant *addr);
4912
4913 /// Registers the dtor using 'llvm.global_dtors' for platforms that do not
4914 /// support an 'atexit()' function.
4915 void registerGlobalDtorWithLLVM(const VarDecl &D, llvm::FunctionCallee fn,
4916 llvm::Constant *addr);
4917
4918 /// Call atexit() with function dtorStub.
4919 void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
4920
4921 /// Call unatexit() with function dtorStub.
4922 llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub);
4923
4924 /// Emit code in this function to perform a guarded variable
4925 /// initialization. Guarded initializations are used when it's not
4926 /// possible to prove that an initialization will be done exactly
4927 /// once, e.g. with a static local variable or a static data member
4928 /// of a class template.
4929 void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
4930 bool PerformInit);
4931
4933
4934 /// Emit a branch to select whether or not to perform guarded initialization.
4935 void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
4936 llvm::BasicBlock *InitBlock,
4937 llvm::BasicBlock *NoInitBlock,
4938 GuardKind Kind, const VarDecl *D);
4939
4940 /// GenerateCXXGlobalInitFunc - Generates code for initializing global
4941 /// variables.
4942 void
4943 GenerateCXXGlobalInitFunc(llvm::Function *Fn,
4944 ArrayRef<llvm::Function *> CXXThreadLocals,
4946
4947 /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global
4948 /// variables.
4950 llvm::Function *Fn,
4951 ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
4952 llvm::Constant *>>
4953 DtorsOrStermFinalizers);
4954
4955 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
4956 const VarDecl *D,
4957 llvm::GlobalVariable *Addr,
4958 bool PerformInit);
4959
4961
4962 void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
4963
4964 void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
4965
4967
4968 //===--------------------------------------------------------------------===//
4969 // Annotations Emission
4970 //===--------------------------------------------------------------------===//
4971
4972 /// Emit an annotation call (intrinsic).
4973 llvm::Value *EmitAnnotationCall(llvm::Function *AnnotationFn,
4974 llvm::Value *AnnotatedVal,
4975 StringRef AnnotationStr,
4976 SourceLocation Location,
4977 const AnnotateAttr *Attr);
4978
4979 /// Emit local annotations for the local variable V, declared by D.
4980 void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
4981
4982 /// Emit field annotations for the given field & value. Returns the
4983 /// annotation result.
4985
4986 //===--------------------------------------------------------------------===//
4987 // Internal Helpers
4988 //===--------------------------------------------------------------------===//
4989
4990 /// ContainsLabel - Return true if the statement contains a label in it. If
4991 /// this statement is not executed normally, it not containing a label means
4992 /// that we can just remove the code.
4993 static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
4994
4995 /// containsBreak - Return true if the statement contains a break out of it.
4996 /// If the statement (recursively) contains a switch or loop with a break
4997 /// inside of it, this is fine.
4998 static bool containsBreak(const Stmt *S);
4999
5000 /// Determine if the given statement might introduce a declaration into the
5001 /// current scope, by being a (possibly-labelled) DeclStmt.
5002 static bool mightAddDeclToScope(const Stmt *S);
5003
5004 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5005 /// to a constant, or if it does but contains a label, return false. If it
5006 /// constant folds return true and set the boolean result in Result.
5008 bool AllowLabels = false);
5009
5010 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5011 /// to a constant, or if it does but contains a label, return false. If it
5012 /// constant folds return true and set the folded value.
5013 bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
5014 bool AllowLabels = false);
5015
5016 /// Ignore parentheses and logical-NOT to track conditions consistently.
5017 static const Expr *stripCond(const Expr *C);
5018
5019 /// isInstrumentedCondition - Determine whether the given condition is an
5020 /// instrumentable condition (i.e. no "&&" or "||").
5021 static bool isInstrumentedCondition(const Expr *C);
5022
5023 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
5024 /// increments a profile counter based on the semantics of the given logical
5025 /// operator opcode. This is used to instrument branch condition coverage
5026 /// for logical operators.
5028 llvm::BasicBlock *TrueBlock,
5029 llvm::BasicBlock *FalseBlock,
5030 uint64_t TrueCount = 0,
5032 const Expr *CntrIdx = nullptr);
5033
5034 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
5035 /// if statement) to the specified blocks. Based on the condition, this might
5036 /// try to simplify the codegen of the conditional based on the branch.
5037 /// TrueCount should be the number of times we expect the condition to
5038 /// evaluate to true based on PGO data.
5039 void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
5040 llvm::BasicBlock *FalseBlock, uint64_t TrueCount,
5042 const Expr *ConditionalOp = nullptr);
5043
5044 /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
5045 /// nonnull, if \p LHS is marked _Nonnull.
5046 void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
5047
5048 /// An enumeration which makes it easier to specify whether or not an
5049 /// operation is a subtraction.
5050 enum { NotSubtraction = false, IsSubtraction = true };
5051
5052 /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
5053 /// detect undefined behavior when the pointer overflow sanitizer is enabled.
5054 /// \p SignedIndices indicates whether any of the GEP indices are signed.
5055 /// \p IsSubtraction indicates whether the expression used to form the GEP
5056 /// is a subtraction.
5057 llvm::Value *EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr,
5059 bool SignedIndices,
5060 bool IsSubtraction,
5062 const Twine &Name = "");
5063
5065 llvm::Type *elementType, bool SignedIndices,
5067 CharUnits Align, const Twine &Name = "");
5068
5069 /// Specifies which type of sanitizer check to apply when handling a
5070 /// particular builtin.
5074 };
5075
5076 /// Emits an argument for a call to a builtin. If the builtin sanitizer is
5077 /// enabled, a runtime check specified by \p Kind is also emitted.
5079
5080 /// Emit a description of a type in a format suitable for passing to
5081 /// a runtime sanitizer handler.
5083
5084 /// Convert a value into a format suitable for passing to a runtime
5085 /// sanitizer handler.
5086 llvm::Value *EmitCheckValue(llvm::Value *V);
5087
5088 /// Emit a description of a source location in a format suitable for
5089 /// passing to a runtime sanitizer handler.
5091
5094
5095 /// Create a basic block that will either trap or call a handler function in
5096 /// the UBSan runtime with the provided arguments, and create a conditional
5097 /// branch to it.
5098 void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
5100 ArrayRef<llvm::Value *> DynamicArgs);
5101
5102 /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
5103 /// if Cond if false.
5104 void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond,
5105 llvm::ConstantInt *TypeId, llvm::Value *Ptr,
5106 ArrayRef<llvm::Constant *> StaticArgs);
5107
5108 /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
5109 /// checking is enabled. Otherwise, just emit an unreachable instruction.
5111
5112 /// Create a basic block that will call the trap intrinsic, and emit a
5113 /// conditional branch to it, for the -ftrapv checks.
5114 void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID);
5115
5116 /// Emit a call to trap or debugtrap and attach function attribute
5117 /// "trap-func-name" if specified.
5118 llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
5119
5120 /// Emit a stub for the cross-DSO CFI check function.
5122
5123 /// Emit a cross-DSO CFI failure handling function.
5125
5126 /// Create a check for a function parameter that may potentially be
5127 /// declared as non-null.
5129 AbstractCallee AC, unsigned ParmNum);
5130
5132 SourceLocation ArgLoc, AbstractCallee AC,
5133 unsigned ParmNum);
5134
5135 /// EmitCallArg - Emit a single call argument.
5136 void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
5137
5138 /// EmitDelegateCallArg - We are performing a delegate call; that
5139 /// is, the current function is delegating to another one. Produce
5140 /// a r-value suitable for passing the given parameter.
5141 void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
5142 SourceLocation loc);
5143
5144 /// SetFPAccuracy - Set the minimum required accuracy of the given floating
5145 /// point operation, expressed as the maximum relative error in ulp.
5146 void SetFPAccuracy(llvm::Value *Val, float Accuracy);
5147
5148 /// Set the minimum required accuracy of the given sqrt operation
5149 /// based on CodeGenOpts.
5150 void SetSqrtFPAccuracy(llvm::Value *Val);
5151
5152 /// Set the minimum required accuracy of the given sqrt operation based on
5153 /// CodeGenOpts.
5154 void SetDivFPAccuracy(llvm::Value *Val);
5155
5156 /// Set the codegen fast-math flags.
5157 void SetFastMathFlags(FPOptions FPFeatures);
5158
5159 // Truncate or extend a boolean vector to the requested number of elements.
5160 llvm::Value *emitBoolVecConversion(llvm::Value *SrcVec,
5161 unsigned NumElementsDst,
5162 const llvm::Twine &Name = "");
5163 // Adds a convergence_ctrl token to |Input| and emits the required parent
5164 // convergence instructions.
5165 template <typename CallType>
5166 CallType *addControlledConvergenceToken(CallType *Input) {
5167 return cast<CallType>(
5168 addConvergenceControlToken(Input, ConvergenceTokenStack.back()));
5169 }
5170
5171private:
5172 // Emits a convergence_loop instruction for the given |BB|, with |ParentToken|
5173 // as it's parent convergence instr.
5174 llvm::IntrinsicInst *emitConvergenceLoopToken(llvm::BasicBlock *BB,
5175 llvm::Value *ParentToken);
5176 // Adds a convergence_ctrl token with |ParentToken| as parent convergence
5177 // instr to the call |Input|.
5178 llvm::CallBase *addConvergenceControlToken(llvm::CallBase *Input,
5179 llvm::Value *ParentToken);
5180 // Find the convergence_entry instruction |F|, or emits ones if none exists.
5181 // Returns the convergence instruction.
5182 llvm::IntrinsicInst *getOrEmitConvergenceEntryToken(llvm::Function *F);
5183 // Find the convergence_loop instruction for the loop defined by |LI|, or
5184 // emits one if none exists. Returns the convergence instruction.
5185 llvm::IntrinsicInst *getOrEmitConvergenceLoopToken(const LoopInfo *LI);
5186
5187private:
5188 llvm::MDNode *getRangeForLoadFromType(QualType Ty);
5189 void EmitReturnOfRValue(RValue RV, QualType Ty);
5190
5191 void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
5192
5194 DeferredReplacements;
5195
5196 /// Set the address of a local variable.
5197 void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
5198 assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
5199 LocalDeclMap.insert({VD, Addr});
5200 }
5201
5202 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
5203 /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
5204 ///
5205 /// \param AI - The first function argument of the expansion.
5206 void ExpandTypeFromArgs(QualType Ty, LValue Dst,
5207 llvm::Function::arg_iterator &AI);
5208
5209 /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
5210 /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
5211 /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
5212 void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
5213 SmallVectorImpl<llvm::Value *> &IRCallArgs,
5214 unsigned &IRCallArgPos);
5215
5216 std::pair<llvm::Value *, llvm::Type *>
5217 EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr,
5218 std::string &ConstraintStr);
5219
5220 std::pair<llvm::Value *, llvm::Type *>
5221 EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue,
5222 QualType InputType, std::string &ConstraintStr,
5223 SourceLocation Loc);
5224
5225 /// Attempts to statically evaluate the object size of E. If that
5226 /// fails, emits code to figure the size of E out for us. This is
5227 /// pass_object_size aware.
5228 ///
5229 /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
5230 llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
5231 llvm::IntegerType *ResType,
5232 llvm::Value *EmittedE,
5233 bool IsDynamic);
5234
5235 /// Emits the size of E, as required by __builtin_object_size. This
5236 /// function is aware of pass_object_size parameters, and will act accordingly
5237 /// if E is a parameter with the pass_object_size attribute.
5238 llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
5239 llvm::IntegerType *ResType,
5240 llvm::Value *EmittedE,
5241 bool IsDynamic);
5242
5243 llvm::Value *emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
5244 llvm::IntegerType *ResType);
5245
5246 void emitZeroOrPatternForAutoVarInit(QualType type, const VarDecl &D,
5247 Address Loc);
5248
5249public:
5250 enum class EvaluationOrder {
5251 ///! No language constraints on evaluation order.
5252 Default,
5253 ///! Language semantics require left-to-right evaluation.
5255 ///! Language semantics require right-to-left evaluation.
5257 };
5258
5259 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
5260 // an ObjCMethodDecl.
5262 llvm::PointerUnion<const FunctionProtoType *, const ObjCMethodDecl *> P;
5263
5266 };
5267
5269 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
5271 unsigned ParamsToSkip = 0,
5273
5274 /// EmitPointerWithAlignment - Given an expression with a pointer type,
5275 /// emit the value and compute our best estimate of the alignment of the
5276 /// pointee.
5277 ///
5278 /// \param BaseInfo - If non-null, this will be initialized with
5279 /// information about the source of the alignment and the may-alias
5280 /// attribute. Note that this function will conservatively fall back on
5281 /// the type when it doesn't recognize the expression and may-alias will
5282 /// be set to false.
5283 ///
5284 /// One reasonable way to use this information is when there's a language
5285 /// guarantee that the pointer must be aligned to some stricter value, and
5286 /// we're simply trying to ensure that sufficiently obvious uses of under-
5287 /// aligned objects don't get miscompiled; for example, a placement new
5288 /// into the address of a local variable. In such a case, it's quite
5289 /// reasonable to just ignore the returned alignment when it isn't from an
5290 /// explicit source.
5291 Address
5292 EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo = nullptr,
5293 TBAAAccessInfo *TBAAInfo = nullptr,
5294 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
5295
5296 /// If \p E references a parameter with pass_object_size info or a constant
5297 /// array size modifier, emit the object size divided by the size of \p EltTy.
5298 /// Otherwise return null.
5299 llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy);
5300
5301 void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
5302
5304 llvm::Function *Function;
5305 struct Conds {
5306 StringRef Architecture;
5308
5309 Conds(StringRef Arch, ArrayRef<StringRef> Feats)
5310 : Architecture(Arch), Features(Feats) {}
5312
5313 MultiVersionResolverOption(llvm::Function *F, StringRef Arch,
5314 ArrayRef<StringRef> Feats)
5315 : Function(F), Conditions(Arch, Feats) {}
5316 };
5317
5318 // Emits the body of a multiversion function's resolver. Assumes that the
5319 // options are already sorted in the proper order, with the 'default' option
5320 // last (if it exists).
5321 void EmitMultiVersionResolver(llvm::Function *Resolver,
5323 void
5324 EmitX86MultiVersionResolver(llvm::Function *Resolver,
5326 void
5327 EmitAArch64MultiVersionResolver(llvm::Function *Resolver,
5329
5330private:
5331 QualType getVarArgType(const Expr *Arg);
5332
5333 void EmitDeclMetadata();
5334
5335 BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
5336 const AutoVarEmission &emission);
5337
5338 void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
5339
5340 llvm::Value *GetValueForARMHint(unsigned BuiltinID);
5341 llvm::Value *EmitX86CpuIs(const CallExpr *E);
5342 llvm::Value *EmitX86CpuIs(StringRef CPUStr);
5343 llvm::Value *EmitX86CpuSupports(const CallExpr *E);
5344 llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
5345 llvm::Value *EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask);
5346 llvm::Value *EmitX86CpuInit();
5347 llvm::Value *FormX86ResolverCondition(const MultiVersionResolverOption &RO);
5348 llvm::Value *EmitAArch64CpuInit();
5349 llvm::Value *
5350 FormAArch64ResolverCondition(const MultiVersionResolverOption &RO);
5351 llvm::Value *EmitAArch64CpuSupports(const CallExpr *E);
5352 llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs);
5353};
5354
5357 if (!needsSaving(value)) return saved_type(value, false);
5358
5359 // Otherwise, we need an alloca.
5360 auto align = CharUnits::fromQuantity(
5361 CGF.CGM.getDataLayout().getPrefTypeAlign(value->getType()));
5362 Address alloca =
5363 CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
5364 CGF.Builder.CreateStore(value, alloca);
5365
5366 return saved_type(alloca.emitRawPointer(CGF), true);
5367}
5368
5370 saved_type value) {
5371 // If the value says it wasn't saved, trust that it's still dominating.
5372 if (!value.getInt()) return value.getPointer();
5373
5374 // Otherwise, it should be an alloca instruction, as set up in save().
5375 auto alloca = cast<llvm::AllocaInst>(value.getPointer());
5376 return CGF.Builder.CreateAlignedLoad(alloca->getAllocatedType(), alloca,
5377 alloca->getAlign());
5378}
5379
5380} // end namespace CodeGen
5381
5382// Map the LangOption for floating point exception behavior into
5383// the corresponding enum in the IR.
5384llvm::fp::ExceptionBehavior
5386} // end namespace clang
5387
5388#endif
Enums/classes describing ABI related information about constructors, destructors and thunks.
#define V(N, I)
Definition: ASTContext.h:3341
static bool CanThrow(Expr *E, ASTContext &Ctx)
Definition: CFG.cpp:2679
@ ForDeactivation
Definition: CGCleanup.cpp:1205
const Decl * D
enum clang::sema::@1655::IndirectLocalPathEntry::EntryKind Kind
Expr * E
unsigned OldSize
Defines the clang::Expr interface and subclasses for C++ expressions.
const CFGBlock * Block
Definition: HTMLLogger.cpp:153
#define X(type, name)
Definition: Value.h:143
llvm::MachO::Architecture Architecture
Definition: MachO.h:27
llvm::MachO::Target Target
Definition: MachO.h:51
Defines some OpenMP-specific enums and functions.
SourceRange Range
Definition: SemaObjC.cpp:758
VarDecl * Variable
Definition: SemaObjC.cpp:757
SourceLocation Loc
Definition: SemaObjC.cpp:759
const char * Data
This file defines OpenACC AST classes for statement-level contructs.
This file defines OpenMP AST classes for executable directives and clauses.
C Language Family Type Representation.
StateNode * Previous
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:187
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4175
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition: Expr.h:6926
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2674
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3566
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3110
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6629
Attr - This represents one attribute.
Definition: Attr.h:42
Represents an attribute applied to a statement.
Definition: Stmt.h:2090
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition: Expr.h:4275
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition: Expr.h:4313
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition: Expr.h:4310
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3860
static bool isLogicalOp(Opcode Opc)
Definition: Expr.h:3993
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:6365
BreakStmt - This represents a break.
Definition: Stmt.h:2990
Represents a call to a CUDA kernel function.
Definition: ExprCXX.h:231
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1491
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2539
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1268
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1375
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition: ExprCXX.h:2498
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2803
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition: ExprCXX.h:478
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition: ExprCXX.h:1737
Represents a call to a member function that may be written either with member call syntax (e....
Definition: ExprCXX.h:176
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2064
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition: ExprCXX.h:2241
A call to an overloaded operator written using operator syntax.
Definition: ExprCXX.h:81
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2617
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
Represents a C++ temporary.
Definition: ExprCXX.h:1457
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1206
CXXTryStmt - A C++ try block, including all handlers.
Definition: StmtCXX.h:69
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition: ExprCXX.h:1066
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2830
Describes the capture of either a variable, or 'this', or variable-length array type.
Definition: Stmt.h:3780
This captures a statement into a function.
Definition: Stmt.h:3767
CaseStmt - Represent a case statement.
Definition: Stmt.h:1811
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3498
const CXXBaseSpecifier *const * path_const_iterator
Definition: Expr.h:3565
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
Represents a 'co_await' expression.
Definition: ExprCXX.h:5185
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
llvm::Value * getBasePointer() const
Definition: Address.h:193
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
bool hasOffset() const
Definition: Address.h:242
void setAlignment(CharUnits Value)
Definition: Address.h:191
llvm::Value * getOffset() const
Definition: Address.h:244
void replaceBasePointer(llvm::Value *P)
This function is used in situations where the caller is doing some sort of opaque "laundering" of the...
Definition: Address.h:181
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:855
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:895
A pair of helper functions for a __block variable.
Information about the layout of a __block variable.
Definition: CGBlocks.h:136
CGBlockInfo - Information to generate a block literal.
Definition: CGBlocks.h:156
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:135
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
Definition: CGBuilder.h:157
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:127
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
All available information about a concrete callee.
Definition: CGCall.h:63
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
An abstract representation of regular/ObjC call/message targets.
const ParmVarDecl * getParamDecl(unsigned I) const
ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
RawAddress getOriginalAllocatedAddress() const
Returns the address for the original alloca instruction.
Address getObjectAddress(CodeGenFunction &CGF) const
Returns the address of the object within this declaration.
API for captured statement code generation.
static bool classof(const CGCapturedStmtInfo *)
llvm::SmallDenseMap< const VarDecl *, FieldDecl * > getCaptureFields()
Get the CaptureFields.
CGCapturedStmtInfo(CapturedRegionKind K=CR_Default)
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
CGCapturedStmtInfo(const CapturedStmt &S, CapturedRegionKind K=CR_Default)
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
RAII for correct setting/restoring of CapturedStmtInfo.
CGCapturedStmtRAII(CodeGenFunction &CGF, CGCapturedStmtInfo *NewCapturedStmtInfo)
CXXDefaultInitExprScope(CodeGenFunction &CGF, const CXXDefaultInitExpr *E)
void Emit(CodeGenFunction &CGF, Flags flags) override
Emit the cleanup.
CallLifetimeEnd(RawAddress addr, llvm::Value *size)
An object to manage conditionally-evaluated expressions.
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const
A scope within which we are constructing the fields of an object which might use a CXXDefaultInitExpr...
FieldConstructionScope(CodeGenFunction &CGF, Address This)
A class controlling the emission of a finally block.
void enter(CodeGenFunction &CGF, const Stmt *Finally, llvm::FunctionCallee beginCatchFn, llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn)
Enters a finally block for an implementation using zero-cost exceptions.
InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:693
LexicalScope(CodeGenFunction &CGF, SourceRange Range)
Enter a new cleanup scope.
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
~LexicalScope()
Exit this cleanup scope, emitting any accumulated cleanups.
RAII for preserving necessary info during inlined region body codegen.
InlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, llvm::BasicBlock &FiniBB)
void Emit(CodeGenFunction &CGF, Flags) override
Emit the cleanup.
RAII for preserving necessary info during Outlined region body codegen.
OutlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, llvm::BasicBlock &RetBB)
Controls insertion of cancellation exit blocks in worksharing constructs.
OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel)
Save/restore original map of previously emitted local vars in case when we need to duplicate emission...
The class used to assign some variables some temporarily addresses.
bool apply(CodeGenFunction &CGF)
Applies new addresses to the list of the variables.
void restore(CodeGenFunction &CGF)
Restores original addresses of the variables.
bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD, Address TempAddr)
Sets the address of the variable LocalVD to be TempAddr in function CGF.
The scope used to remap some variables as private in the OpenMP loop body (or other captured region e...
void restoreMap()
Restore all mapped variables w/o clean up.
bool Privatize()
Privatizes local variables previously registered as private.
bool isGlobalVarCaptured(const VarDecl *VD) const
Checks if the global variable is captured in current function.
OMPPrivateScope(CodeGenFunction &CGF)
Enter a new OpenMP private scope.
~OMPPrivateScope()
Exit scope - all the mapped variables are restored.
bool addPrivate(const VarDecl *LocalVD, Address Addr)
Registers LocalVD variable as a private with Addr as the address of the corresponding private variabl...
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
OpaqueValueMapping(CodeGenFunction &CGF, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?...
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, LValue lvalue)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
ParentLoopDirectiveForScanRegion(CodeGenFunction &CGF, const OMPExecutableDirective &ParentLoopDirectiveForScan)
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
RunCleanupsScope(CodeGenFunction &CGF)
Enter a new cleanup scope.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
An RAII object to record that we're evaluating a statement expression.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, ObjCMethodDecl *MD, bool ctor)
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, QualType::DestructionKind dtorKind)
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
void GenerateCXXGlobalInitFunc(llvm::Function *Fn, ArrayRef< llvm::Function * > CXXThreadLocals, ConstantAddress Guard=ConstantAddress::invalid())
GenerateCXXGlobalInitFunc - Generates code for initializing global variables.
llvm::Value * EmitPointerAuthAuth(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
RValue EmitLoadOfGlobalRegLValue(LValue LV)
void EmitGotoStmt(const GotoStmt &S)
void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, Address This, Address Src, const CXXConstructExpr *E)
void EmitDestructorBody(FunctionArgList &Args)
void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, OMPTaskDataTy &Data)
void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD)
llvm::BasicBlock * getEHDispatchBlock(EHScopeStack::stable_iterator scope)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete, llvm::Value *CompletePtr, QualType ElementType)
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, Address This, CallArgList &Args, AggValueSlot::Overlap_t Overlap, SourceLocation Loc, bool NewPointerIsChecked)
void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount=0, Stmt::Likelihood LH=Stmt::LH_None, const Expr *CntrIdx=nullptr)
EmitBranchToCounterBlock - Emit a conditional branch to a new block that increments a profile counter...
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
void EmitARCDestroyWeak(Address addr)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue)
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags, bool LoadBlockVarAddr, bool CanThrow)
Enter a cleanup to destroy a __block variable.
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPAggregateAssign(Address DestAddr, Address SrcAddr, QualType OriginalType, const llvm::function_ref< void(Address, Address)> CopyGen)
Perform element by element copying of arrays with type OriginalType from SrcAddr to DestAddr using co...
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Emit a check that V is the address of storage of the appropriate size and alignment for an object of ...
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
void EmitAsanPrologueOrEpilogue(bool Prologue)
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitCastLValue(const CastExpr *E)
void EnterSEHTryStmt(const SEHTryStmt &S)
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl)
llvm::Value * EmitSVEPredicateCast(llvm::Value *Pred, llvm::ScalableVectorType *VTy)
Address getExceptionSlot()
Returns a pointer to the function's exception object and selector slot, which is assigned in every la...
RawAddress CreateMemTemp(QualType T, CharUnits Align, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
bool isBinaryLogicalOp(const Expr *E) const
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
void VolatilizeTryBlocks(llvm::BasicBlock *BB, llvm::SmallPtrSet< llvm::BasicBlock *, 10 > &V)
void EmitLambdaInAllocaImplFn(const CXXMethodDecl *CallOp, const CGFunctionInfo **ImplFnInfo, llvm::Function **ImplFn)
llvm::Function * GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF, const SEHFinallyStmt &Finally)
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
llvm::Function * GenerateSEHFilterFunction(CodeGenFunction &ParentCGF, const SEHExceptStmt &Except)
static Destroyer destroyNonTrivialCStruct
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee, const ThunkInfo *Thunk, bool IsUnprototyped)
void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, CXXCtorType CtorType, const FunctionArgList &Args, SourceLocation Loc)
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
static bool cxxDestructorCanThrow(QualType T)
Check if T is a C++ class that has a destructor that can throw.
void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, const FunctionArgList &Args)
llvm::Function * GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk)
SanitizerSet SanOpts
Sanitizers enabled for this function.
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
LValue EmitCoawaitLValue(const CoawaitExpr *E)
llvm::BasicBlock * getInvokeDestImpl()
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
void EmitOMPCopy(QualType OriginalType, Address DestAddr, Address SrcAddr, const VarDecl *DestVD, const VarDecl *SrcVD, const Expr *Copy)
Emit proper copying of data from one variable to another.
void EmitIfStmt(const IfStmt &S)
void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator, CallArgList &CallArgs, const CGFunctionInfo *CallOpFnInfo=nullptr, llvm::Constant *CallOpFn=nullptr)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
void PushDestructorCleanup(const CXXDestructorDecl *Dtor, QualType T, Address Addr)
PushDestructorCleanup - Push a cleanup to call the complete-object variant of the given destructor on...
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope)
void EmitARCMoveWeak(Address dst, Address src)
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
void EmitOMPReductionClauseInit(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope, bool ForInscan=false)
Emit initial code for reduction variables.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitVTableAssumptionLoad(const VPtr &vptr, Address This)
Emit assumption that vptr load == global vtable.
void unprotectFromPeepholes(PeepholeProtection protection)
Address getAsNaturalAddressOf(Address Addr, QualType PointeeTy)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter, const Stmt *OutlinedStmt)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
SmallVector< Address, 1 > SEHCodeSlotStack
A stack of exception code slots.
JumpDest getJumpDestInCurrentScope(StringRef Name=StringRef())
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void generateObjCGetterBody(const ObjCImplementationDecl *classImpl, const ObjCPropertyImplDecl *propImpl, const ObjCMethodDecl *GetterMothodDecl, llvm::Constant *AtomicHelperFn)
void EmitAutoVarDecl(const VarDecl &D)
EmitAutoVarDecl - Emit an auto variable declaration.
void GetAArch64SVEProcessedOperands(unsigned BuiltinID, const CallExpr *E, SmallVectorImpl< llvm::Value * > &Ops, SVETypeFlags TypeFlags)
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
llvm::Constant * createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor, llvm::Constant *Addr)
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static void EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDirective &S)
Emit device code for the target teams directive.
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
void callCStructDefaultConstructor(LValue Dst)
void EmitOMPReverseDirective(const OMPReverseDirective &S)
static bool hasScalarEvaluationKind(QualType T)
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue)
llvm::Value * EmitObjCAutoreleasePoolPush()
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
llvm::Value * EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx)
void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable, CFITypeCheckKind TCK, SourceLocation Loc)
EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
llvm::Value * EmitARCRetainAutoreleaseNonBlock(llvm::Value *value)
void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr)
const BlockByrefInfo & getBlockByrefInfo(const VarDecl *var)
AwaitSuspendWrapperInfo CurAwaitSuspendWrapper
llvm::function_ref< std::pair< llvm::Value *, llvm::Value * >(CodeGenFunction &, const OMPExecutableDirective &S, Address LB, Address UB)> CodeGenDispatchBoundsTy
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T)
Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known to be unsigned.
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Address authPointerToPointerCast(Address Ptr, QualType SourceType, QualType DestType)
void EmitCXXTryStmt(const CXXTryStmt &S)
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
std::pair< LValue, llvm::Value * > EmitARCStoreStrong(const BinaryOperator *e, bool ignored)
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk, bool IsUnprototyped)
Generate a thunk for the given method.
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
llvm::Value * EmitARCRetainAutoreleasedReturnValue(llvm::Value *value)
void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
static void EmitOMPTargetTeamsDistributeDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeDirective &S)
Emit device code for the target teams distribute directive.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp)
static void EmitOMPTargetParallelForSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelForSimdDirective &S)
Emit device code for the target parallel for simd directive.
llvm::Value * EmitObjCAllocWithZone(llvm::Value *value, llvm::Type *returnType)
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, llvm::Value *NumElements, Address ArrayPtr, const CXXConstructExpr *E, bool NewPointerIsChecked, bool ZeroInitialization=false)
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::Value * EmitSVEGatherLoad(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, const ArrayType *ArrayTy, Address ArrayPtr, const CXXConstructExpr *E, bool NewPointerIsChecked, bool ZeroInitialization=false)
void popCatchScope()
popCatchScope - Pops the catch scope at the top of the EHScope stack, emitting any required code (oth...
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
llvm::Value * EmitRISCVCpuSupports(const CallExpr *E)
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual)
Determine whether a base class initialization may overlap some other object.
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
const OMPExecutableDirective * OMPParentLoopDirectiveForScan
Parent loop-based directive for scan directive.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
void EmitOMPTaskDirective(const OMPTaskDirective &S)
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitAnyExprToExn(const Expr *E, Address Addr)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind)
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
void EmitAArch64MultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
llvm::Value * EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void GenerateObjCMethod(const ObjCMethodDecl *OMD)
void EmitOMPUseDevicePtrClause(const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, const llvm::DenseMap< const ValueDecl *, llvm::Value * > CaptureDeviceAddrMap)
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
llvm::Value * EmitARCAutorelease(llvm::Value *value)
llvm::Value * emitPointerAuthResignCall(llvm::Value *Pointer, const CGPointerAuthInfo &CurInfo, const CGPointerAuthInfo &NewInfo)
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
void EmitExtendGCLifetime(llvm::Value *object)
EmitExtendGCLifetime - Given a pointer to an Objective-C object, make sure it survives garbage collec...
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
void EmitOMPDistributeLoop(const OMPLoopDirective &S, const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr)
Emit code for the distribute loop-based directive.
void EmitARCNoopIntrinsicUse(ArrayRef< llvm::Value * > values)
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::Value * EmitSVEMaskedStore(const CallExpr *, SmallVectorImpl< llvm::Value * > &Ops, unsigned BuiltinID)
llvm::Constant * GenerateObjCAtomicGetterCopyHelperFunction(const ObjCPropertyImplDecl *PID)
void callCStructCopyAssignmentOperator(LValue Dst, LValue Src)
void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S)
void callCStructMoveConstructor(LValue Dst, LValue Src)
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty)
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF, llvm::Value *ParentFP, llvm::Value *EntryEBP)
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
llvm::BasicBlock * getEHResumeBlock(bool isCleanup)
static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetDirective &S)
Emit device code for the target directive.
void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr, QualType DeleteTy, llvm::Value *NumElements=nullptr, CharUnits CookieSize=CharUnits())
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
void callCStructCopyConstructor(LValue Dst, LValue Src)
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
llvm::Value * EmitSEHExceptionInfo()
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
llvm::Value * EmitARCLoadWeakRetained(Address addr)
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S)
Emit device code for the target simd directive.
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope, llvm::AtomicOrdering &AO, llvm::SyncScope::ID &SSID)
void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S, OMPPrivateScope &LoopScope)
Emit initial code for loop counters of loop-based directives.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
LValue EmitInitListLValue(const InitListExpr *E)
llvm::Value * EmitARCRetainAutorelease(QualType type, llvm::Value *value)
void emitArrayDestroy(llvm::Value *begin, llvm::Value *end, QualType elementType, CharUnits elementAlign, Destroyer *destroyer, bool checkZeroLength, bool useEHCleanup)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
RValue EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method, const CGCallee &Callee, ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E, CallArgList *RtlArgs)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D, bool NoFinals, llvm::Value *IsLastIterCond=nullptr)
Emit final copying of lastprivate values to original variables at the end of the worksharing or simd ...
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, Address Addr, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
llvm::Function * generateAwaitSuspendWrapper(Twine const &CoroName, Twine const &SuspendPointName, CoroutineSuspendExpr const &S)
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
void EmitX86MultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
SmallVector< llvm::Value *, 8 > ObjCEHValueStack
ObjCEHValueStack - Stack of Objective-C exception values, used for rethrows.
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
void EmitFunctionBody(const Stmt *Body)
VlaSizePair getVLAElements1D(QualType vla)
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
llvm::Value * EmitSVETupleCreate(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType, ArrayRef< llvm::Value * > Ops)
const CodeGen::CGBlockInfo * BlockInfo
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void EmitAggregateCopyCtor(LValue Dest, LValue Src, AggValueSlot::Overlap_t MayOverlap)
llvm::Value * EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable, llvm::Type *VTableTy, uint64_t VTableByteOffset)
Emit a type checked load from the given vtable.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
llvm::AllocaInst * EHSelectorSlot
The selector slot.
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
EmitExprAsInit - Emits the code necessary to initialize a location in memory with the given initializ...
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, const Twine &name="")
void emitByrefStructureInit(const AutoVarEmission &emission)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
void callCStructDestructor(LValue Dst)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
llvm::Value * EmitObjCRetainNonBlock(llvm::Value *value, llvm::Type *returnType)
llvm::Value * GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, bool Delegating)
GetVTTParameter - Return the VTT parameter that should be passed to a base constructor/destructor wit...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E)
Emit a call to a constructor inherited from a base class, passing the current constructor's arguments...
llvm::Value * EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType)
Address EmitExtVectorElementLValue(LValue V)
void EmitOMPSimdFinal(const OMPLoopDirective &D, const llvm::function_ref< llvm::Value *(CodeGenFunction &)> CondGen)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_ConstructorCall
Checking the 'this' pointer for a constructor call.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_NonnullAssign
Checking the value assigned to a _Nonnull pointer. Must not be null.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
@ TCK_Upcast
Checking the operand of a cast to a base object.
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
llvm::Value * EmitSMELdrStr(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitContinueStmt(const ContinueStmt &S)
void setCurrentProfileCount(uint64_t Count)
Set the profiler's current count.
llvm::BasicBlock * getTerminateFunclet()
getTerminateLandingPad - Return a cleanup funclet that just calls terminate.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
llvm::Value * EmitARCStoreStrongCall(Address addr, llvm::Value *value, bool resultIgnored)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, bool isInit=false, bool isNontemporal=false)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitCallExprLValue(const CallExpr *E)
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
llvm::Value * FormSVEBuiltinResult(llvm::Value *Call)
FormSVEBuiltinResult - Returns the struct of scalable vectors as a wider vector.
llvm::Value * EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx, const llvm::ElementCount &Count)
VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass)
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
llvm::Type * ConvertTypeForMem(QualType T)
llvm::Function * createTLSAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor, llvm::Constant *Addr, llvm::FunctionCallee &AtExit)
Address EmitCheckedInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *elementType, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align, const Twine &Name="")
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
llvm::Value * EmitARCUnsafeUnretainedScalarExpr(const Expr *expr)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitAutoVarInit(const AutoVarEmission &emission)
llvm::BasicBlock * getUnreachableBlock()
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy)
Emit an aggregate assignment.
void GenerateOpenMPCapturedVars(const CapturedStmt &S, SmallVectorImpl< llvm::Value * > &CapturedVars)
void EmitNonNullArgCheck(Address Addr, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
bool isPointerKnownNonNull(const Expr *E)
RawAddress CreateMemTempWithoutCast(QualType T, CharUnits Align, const Twine &Name="tmp")
llvm::Value * EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy, SmallVectorImpl< llvm::Value * > &Ops, unsigned BuiltinID, bool IsZExtReturn)
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
llvm::SmallVector< const JumpDest *, 2 > SEHTryEpilogueStack
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
DominatingValue< T >::saved_type saveValueInCond(T value)
const llvm::function_ref< void(CodeGenFunction &, llvm::Function *, const OMPTaskDataTy &)> TaskGenTy
static void EmitOMPTargetTeamsGenericLoopDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsGenericLoopDirective &S)
Emit device code for the target teams loop directive.
llvm::Value * ExceptionSlot
The exception slot.
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
llvm::Value * EmitARCRetainBlock(llvm::Value *value, bool mandatory)
QualType TypeOfSelfObject()
TypeOfSelfObject - Return type of object that this self represents.
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
llvm::Value * EmitSVEDupX(llvm::Value *Scalar)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
void EmitAttributedStmt(const AttributedStmt &S)
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
llvm::BasicBlock * OMPBeforeScanBlock
void registerGlobalDtorWithLLVM(const VarDecl &D, llvm::FunctionCallee fn, llvm::Constant *addr)
Registers the dtor using 'llvm.global_dtors' for platforms that do not support an 'atexit()' function...
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
llvm::SmallPtrSet< const CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType, llvm::Type *ElementTy, Address NewPtr, llvm::Value *NumElements, llvm::Value *AllocSizeWithoutCookie)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
llvm::Value * EmitPointerAuthSign(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
void pushCleanupAndDeferDeactivation(CleanupKind Kind, As... A)
void EmitAtomicInit(Expr *E, LValue lvalue)
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
static std::string getNonTrivialDestructorStr(QualType QT, CharUnits Alignment, bool IsVolatile, ASTContext &Ctx)
llvm::DenseMap< const Decl *, Address > DeclMapTy
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we're intending to store to the side, but which will prob...
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
llvm::Value * EmitARCLoadWeak(Address addr)
const TargetInfo & getTarget() const
void initFullExprCleanup()
Set up the last cleanup that was pushed as a conditional full-expression cleanup.
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeParallelForDirective &S)
void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy, SourceLocation Loc)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
void EmitOMPInnerLoop(const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, const Expr *IncExpr, const llvm::function_ref< void(CodeGenFunction &)> BodyGen, const llvm::function_ref< void(CodeGenFunction &)> PostIncGen)
Emit inner loop of the worksharing/simd construct.
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Value * EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress, llvm::Value *Discriminator)
Create the discriminator from the storage address and the entity hash.
llvm::Constant * GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo)
llvm::Value * vectorWrapScalar16(llvm::Value *Op)
llvm::Function * LookupNeonLLVMIntrinsic(unsigned IntrinsicID, unsigned Modifier, llvm::Type *ArgTy, const CallExpr *E)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
void EmitLabelStmt(const LabelStmt &S)
void emitDestroy(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Value *Chain=nullptr)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
llvm::Value * EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType)
LValue EmitVAArgExprLValue(const VAArgExpr *E)
llvm::Value * EmitSEHExceptionCode()
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
llvm::Value * EmitObjCCollectionLiteral(const Expr *E, const ObjCMethodDecl *MethodWithObjects)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void GenerateCXXGlobalCleanUpFunc(llvm::Function *Fn, ArrayRef< std::tuple< llvm::FunctionType *, llvm::WeakTrackingVH, llvm::Constant * > > DtorsOrStermFinalizers)
GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global variables.
void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit)
Emit code in this function to perform a guarded variable initialization.
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV, bool PerformInit)
EmitCXXGlobalVarDeclInit - Create the initializer for a C++ variable with global storage.
LValue EmitCoyieldLValue(const CoyieldExpr *E)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
llvm::Value * EmitObjCThrowOperand(const Expr *expr)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
llvm::Value * emitPointerAuthResign(llvm::Value *Pointer, QualType PointerType, const CGPointerAuthInfo &CurAuthInfo, const CGPointerAuthInfo &NewAuthInfo, bool IsKnownNonNull)
void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable, CFITypeCheckKind TCK, SourceLocation Loc)
EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for RD using llvm....
void EmitOMPSingleDirective(const OMPSingleDirective &S)
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
ComplexPairTy EmitPromotedComplexExpr(const Expr *E, QualType PromotionType)
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
void initFullExprCleanupWithFlag(RawAddress ActiveFlag)
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue)
llvm::Value * EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::BasicBlock * getInvokeDest()
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
llvm::CanonicalLoopInfo * EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth)
Emit the Stmt S and return its topmost canonical loop, if any.
llvm::Value * EmitRISCVCpuSupports(ArrayRef< StringRef > FeaturesStrs)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
llvm::Value * LoadObjCSelf()
LoadObjCSelf - Load the value of self.
bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD)
Returns whether we should perform a type checked load when loading a virtual function for virtual cal...
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO, bool IsVolatile, bool isInit)
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
llvm::Value * EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType, ArrayRef< llvm::Value * > Ops)
void EmitARCCopyWeak(Address dst, Address src)
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void pushSEHCleanup(CleanupKind kind, llvm::Function *FinallyFunc)
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void PushDestructorCleanup(QualType T, Address Addr)
PushDestructorCleanup - Push a cleanup to call the complete-object destructor of an object of the giv...
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, AggValueSlot ThisAVS, const CXXConstructExpr *E)
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD, CXXDtorType Type, const CXXRecordDecl *RD)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
bool ShouldSkipSanitizerInstrumentation()
ShouldSkipSanitizerInstrumentation - Return true if the current function should not be instrumented w...
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, llvm::Value *Index, QualType IndexType, QualType IndexedType, bool Accessed)
llvm::Value * EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty)
llvm::Value * EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags, SmallVectorImpl< llvm::Value * > &Ops, unsigned BuiltinID)
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
llvm::Value * EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty, const llvm::CmpInst::Predicate Fp, const llvm::CmpInst::Predicate Ip, const llvm::Twine &Name="")
void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum, llvm::Value *ptr)
void defaultInitNonTrivialCStructVar(LValue Dst)
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitLoadOfExtVectorElementLValue(LValue V)
llvm::ScalableVectorType * getSVEType(const SVETypeFlags &TypeFlags)
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
void EmitSwitchStmt(const SwitchStmt &S)
LValue EmitLValueForLambdaField(const FieldDecl *Field, llvm::Value *ThisValue)
bool isTrivialInitializer(const Expr *Init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF, Address ParentVar, llvm::Value *ParentFP)
Recovers the address of a local in a parent function.
const FieldDecl * FindFlexibleArrayMemberFieldAndOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FAMDecl, uint64_t &Offset)
void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn, llvm::Constant *addr)
Call atexit() with a function that passes the given argument to the given function.
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
Address EmitVAListRef(const Expr *E)
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
Address emitBlockByrefAddress(Address baseAddr, const BlockByrefInfo &info, bool followForward, const llvm::Twine &name)
LValue EmitDeclRefLValue(const DeclRefExpr *E)
llvm::Value * EmitLoadOfScalar(LValue lvalue, SourceLocation Loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Same as MakeAddrLValue above except that the pointer is known to be unsigned.
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
void EmitSEHTryStmt(const SEHTryStmt &S)
void maybeCreateMCDCCondBitmap()
Allocate a temp value on the stack that MCDC can use to track condition results.
void EmitOMPInteropDirective(const OMPInteropDirective &S)
llvm::Value * EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty, bool negateForRightShift)
void ExitSEHTryStmt(const SEHTryStmt &S)
llvm::Constant * GenerateCopyHelperFunction(const CGBlockInfo &blockInfo)
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind)
Emit final update of reduction values to original variables at the end of the directive.
llvm::Value * unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub)
Call unatexit() with function dtorStub.
SmallVector< llvm::IntrinsicInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
llvm::BasicBlock * OMPScanDispatch
llvm::BasicBlock * getTerminateLandingPad()
getTerminateLandingPad - Return a landing pad that just calls terminate.
llvm::BasicBlock * getTerminateHandler()
getTerminateHandler - Return a handler (not a landing pad, just a catch handler) that just calls term...
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr)
llvm::Value * EmitSVEMovl(const SVETypeFlags &TypeFlags, llvm::ArrayRef< llvm::Value * > Ops, unsigned BuiltinID)
llvm::function_ref< std::pair< LValue, LValue >(CodeGenFunction &, const OMPExecutableDirective &S)> CodeGenLoopBoundsTy
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke=nullptr, bool IsMustTail=false)
llvm::Value * EmitARCRetainAutoreleaseScalarExpr(const Expr *expr)
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
const TargetCodeGenInfo & getTargetHooks() const
void setBeforeOutermostConditional(llvm::Value *value, Address addr, CodeGenFunction &CGF)
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
LValue EmitPredefinedLValue(const PredefinedExpr *E)
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor, CXXCtorType CtorType, bool ForVirtualBase, bool Delegating, CallArgList &Args)
Emit a call to an inheriting constructor (that is, one that invokes a constructor inherited from a ba...
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
llvm::Type * getEltType(const SVETypeFlags &TypeFlags)
CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false)
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, OMPTargetDataInfo &InputInfo)
void EmitDeclStmt(const DeclStmt &S)
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock=false)
static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor)
llvm::Function * GenerateBlockFunction(GlobalDecl GD, const CGBlockInfo &Info, const DeclMapTy &ldm, bool IsLambdaConversionToBlock, bool BuildGlobalBlock)
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
static void EmitOMPTargetParallelForDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelForDirective &S)
Emit device code for the target parallel for directive.
llvm::Value * EmitSVEPMull(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned BuiltinID)
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy, QualType RTy)
void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind, RawAddress ActiveFlag, As... A)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub)
Call atexit() with function dtorStub.
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
void EmitOMPSimdInit(const OMPLoopDirective &D)
Helpers for the OpenMP loop directives.
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
RValue EmitCXXDestructorCall(GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy, llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E)
llvm::Value * EmitSVEScatterStore(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
void EmitConstructorBody(FunctionArgList &Args)
void SetFastMathFlags(FPOptions FPFeatures)
Set the codegen fast-math flags.
int ExpectedOMPLoopDepth
Number of nested loop to be consumed by the last surrounding loop-associated directive.
void EmitVarDecl(const VarDecl &D)
EmitVarDecl - Emit a local variable declaration.
llvm::Value * EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value)
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
CGCallee BuildAppleKextVirtualCall(const CXXMethodDecl *MD, NestedNameSpecifier *Qual, llvm::Type *Ty)
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This)
Emit assumption load for all bases.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
CGCallee EmitCallee(const Expr *E)
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
llvm::Constant * GenerateObjCAtomicSetterCopyHelperFunction(const ObjCPropertyImplDecl *PID)
ComplexPairTy EmitUnPromotedValue(ComplexPairTy result, QualType PromotionType)
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeSimdDirective &S)
Emit device code for the target teams distribute simd directive.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD)
SmallVector< llvm::CanonicalLoopInfo *, 4 > OMPLoopNestStack
List of recently emitted OMPCanonicalLoops.
bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB, const CodeGenLoopBoundsTy &CodeGenLoopBounds, const CodeGenDispatchBoundsTy &CGDispatchBounds)
Emit code for the worksharing loop-based directive.
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
llvm::Value * LoadCXXVTT()
LoadCXXVTT - Load the VTT parameter to base constructors/destructors have virtual bases.
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
void EmitOMPLinearClause(const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope)
Emit initial code for linear clauses.
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
void StartThunk(llvm::Function *Fn, GlobalDecl GD, const CGFunctionInfo &FnInfo, bool IsUnprototyped)
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr)
LValue EmitMemberExpr(const MemberExpr *E)
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
Address GetAddressOfDirectBaseInCompleteClass(Address Value, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual)
GetAddressOfBaseOfCompleteClass - Convert the given pointer to a complete class to the given direct b...
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
llvm::Value * SEHInfo
Value returned by __exception_info intrinsic.
llvm::Value * BuildVector(ArrayRef< llvm::Value * > Ops)
ConstantEmission tryEmitAsConstant(const MemberExpr *ME)
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void callCStructMoveAssignmentOperator(LValue Dst, LValue Src)
void EmitAutoVarCleanups(const AutoVarEmission &emission)
llvm::GlobalVariable * AddInitializerToStaticVarDecl(const VarDecl &D, llvm::GlobalVariable *GV)
AddInitializerToStaticVarDecl - Add the initializer for 'D' to the global variable that has already b...
llvm::Value * EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
void EmitOMPTileDirective(const OMPTileDirective &S)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
bool EmitOMPLinearClauseInit(const OMPLoopDirective &D)
Emit initial code for linear variables.
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
llvm::BasicBlock * EmitLandingPad()
Emits a landing pad for the current EH stack.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD, llvm::Value *VTable, SourceLocation Loc)
If whole-program virtual table optimization is enabled, emit an assumption that VTable is a member of...
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit)
Helper for the OpenMP loop directives.
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
VlaSizePair getVLASize(QualType vla)
llvm::Value * EmitObjCMRRAutoreleasePoolPush()
void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size)
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitOMPLinearClauseFinal(const OMPLoopDirective &D, const llvm::function_ref< llvm::Value *(CodeGenFunction &)> CondGen)
Emit final code for linear clauses.
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue)
Address GetAddrOfBlockDecl(const VarDecl *var)
CodeGenTypes & getTypes() const
void EmitARCInitWeak(Address addr, llvm::Value *value)
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
llvm::BasicBlock * OMPScanExitBlock
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeParallelForSimdDirective &S)
Emit device code for the target teams distribute parallel for simd directive.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
RawAddress CreateTempAlloca(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr, RawAddress *Alloca=nullptr)
void EmitOMPUseDeviceAddrClause(const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, const llvm::DenseMap< const ValueDecl *, llvm::Value * > CaptureDeviceAddrMap)
void generateObjCSetterBody(const ObjCImplementationDecl *classImpl, const ObjCPropertyImplDecl *propImpl, llvm::Constant *AtomicHelperFn)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
llvm::Value * EmitSMEReadWrite(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::Value * EmitSMELd1St1(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
ActivateCleanupBlock - Activates an initially-inactive cleanup.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, size_t OldLifetimeExtendedStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added,...
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D, llvm::GlobalVariable *Addr, bool PerformInit)
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
LValue EmitStringLiteralLValue(const StringLiteral *E)
void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt, bool IsFilter)
Scan the outlined statement for captures from the parent function.
static Destroyer destroyARCStrongPrecise
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type, FunctionArgList &Args)
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc, llvm::AtomicOrdering AO, bool IsVolatile=false, AggValueSlot slot=AggValueSlot::ignored())
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
llvm::Value * EmitSVEStructLoad(const SVETypeFlags &TypeFlags, SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD, CallArgList &CallArgs)
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
LValue EmitOMPSharedLValue(const Expr *E)
Emits the lvalue for the expression with possibly captured variable.
void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, llvm::Value *Address)
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
void processInReduction(const OMPExecutableDirective &S, OMPTaskDataTy &Data, CodeGenFunction &CGF, const CapturedStmt *CS, OMPPrivateScope &Scope)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters.
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
void pushStackRestore(CleanupKind kind, Address SPMem)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
llvm::Value * EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt, llvm::Type *Ty, bool usgn, const char *name)
void GenerateObjCSetter(ObjCImplementationDecl *IMP, const ObjCPropertyImplDecl *PID)
GenerateObjCSetter - Synthesize an Objective-C property setter function for the given property.
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
bool EmitOMPCopyinClause(const OMPExecutableDirective &D)
Emit code for copyin clause in D directive.
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
SmallVector< llvm::Type *, 2 > getSVEOverloadTypes(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType, ArrayRef< llvm::Value * > Ops)
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPPrivateClause(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
llvm::Function * GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, SourceLocation Loc)
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, bool isNontemporal=false)
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr)
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
static void EmitOMPTargetParallelGenericLoopDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelGenericLoopDirective &S)
Emit device code for the target parallel loop directive.
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::function_ref< void(CodeGenFunction &, SourceLocation, const unsigned, const bool)> CodeGenOrderedTy
llvm::Value * EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
void GenerateObjCGetter(ObjCImplementationDecl *IMP, const ObjCPropertyImplDecl *PID)
GenerateObjCGetter - Synthesize an Objective-C property getter function.
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
const CGFunctionInfo * CurFnInfo
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
llvm::Value * EmitSVEStructStore(const SVETypeFlags &TypeFlags, SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::BasicBlock * getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope)
void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr)
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
void pushKmpcAllocFree(CleanupKind Kind, std::pair< llvm::Value *, llvm::Value * > AddrSizePair)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
llvm::Value * EmitSEHAbnormalTermination()
void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
void EmitCoreturnStmt(const CoreturnStmt &S)
void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type)
EnterDtorCleanups - Enter the cleanups necessary to complete the given phase of destruction for a des...
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
llvm::BasicBlock * OMPAfterScanBlock
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
void EmitOMPErrorDirective(const OMPErrorDirective &S)
static Destroyer destroyARCStrongImprecise
void EmitOMPSectionDirective(const OMPSectionDirective &S)
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
static void EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelDirective &S)
llvm::Value * EmitSVEAllTruePred(const SVETypeFlags &TypeFlags)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot())
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue)
void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock=false)
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
llvm::Value * EmitObjCAlloc(llvm::Value *value, llvm::Type *returnType)
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
LValue EmitStmtExprLValue(const StmtExpr *E)
llvm::Instruction * CurrentFuncletPad
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
llvm::Type * SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags)
SVEBuiltinMemEltTy - Returns the memory element type for this memory access builtin.
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst, const CallExpr *E)
bool checkIfFunctionMustProgress()
Returns true if a function must make progress, which means the mustprogress attribute can be added.
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo)
Emits the alloca and debug information for the size expressions for each dimension of an array.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
llvm::SmallVector< VPtr, 4 > VPtrsVector
llvm::Value * EmitSMEZero(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
llvm::Value * getSelectorFromSlot()
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope)
Emit initial code for lastprivate variables.
static std::string getNonTrivialCopyConstructorStr(QualType QT, CharUnits Alignment, bool IsVolatile, ASTContext &Ctx)
void InitializeVTablePointers(const CXXRecordDecl *ClassDecl)
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
llvm::Value * EmitCommonNeonBuiltinExpr(unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic, const char *NameHint, unsigned Modifier, const CallExpr *E, SmallVectorImpl< llvm::Value * > &Ops, Address PtrOp0, Address PtrOp1, llvm::Triple::ArchType Arch)
void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase, CharUnits OffsetFromNearestVBase, bool BaseIsNonVirtualPrimaryBase, const CXXRecordDecl *VTableClass, VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs)
llvm::function_ref< void(CodeGenFunction &, const OMPLoopDirective &, JumpDest)> CodeGenLoopTy
void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags, bool CanThrow)
llvm::Value * EmitNeonCall(llvm::Function *F, SmallVectorImpl< llvm::Value * > &O, const char *name, unsigned shift=0, bool rightshift=false)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void InitializeVTablePointer(const VPtr &vptr)
Initialize the vtable pointer of the given subobject.
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
Address emitAddrOfRealComponent(Address complex, QualType complexType)
RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, const Expr *Base)
void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
llvm::ScalableVectorType * getSVEPredType(const SVETypeFlags &TypeFlags)
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
llvm::Value * getExceptionFromSlot()
Returns the contents of the function's exception object and selector slots.
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
llvm::Value * EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags, SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef< Address > args, const Twine &name="")
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
RValue EmitAtomicExpr(AtomicExpr *E)
CGPointerAuthInfo EmitPointerAuthInfo(const PointerAuthSchema &Schema, llvm::Value *StorageAddress, GlobalDecl SchemaDecl, QualType SchemaType)
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit, llvm::BasicBlock *InitBlock, llvm::BasicBlock *NoInitBlock, GuardKind Kind, const VarDecl *D)
Emit a branch to select whether or not to perform guarded initialization.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
CallType * addControlledConvergenceToken(CallType *Input)
LValue EmitLoadOfReferenceLValue(Address RefAddr, QualType RefTy, AlignmentSource Source=AlignmentSource::Type)
std::pair< bool, RValue > EmitOMPAtomicSimpleUpdateExpr(LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, llvm::AtomicOrdering AO, SourceLocation Loc, const llvm::function_ref< RValue(RValue)> CommonGen)
Emit atomic update code for constructs: X = X BO E or X = E BO E.
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr, llvm::FunctionCallee Callee)
Emit a musttail call for a thunk with a potentially adjusted this pointer.
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void pushDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
llvm::Type * ConvertType(const TypeDecl *T)
This class organizes the cross-function state that is used while generating LLVM code.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGPointerAuthInfo getPointerAuthInfoForPointeeType(QualType type)
const llvm::DataLayout & getDataLayout() const
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
A specialization of Address that requires the address to be an LLVM Constant.
Definition: Address.h:294
static ConstantAddress invalid()
Definition: Address.h:302
DominatingValue< Address >::saved_type AggregateAddr
static saved_type save(CodeGenFunction &CGF, RValue value)
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:141
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:203
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Definition: EHScopeStack.h:94
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
void pushCleanupTuple(CleanupKind Kind, std::tuple< As... > A)
Push a lazily-created cleanup on the stack. Tuple version.
Definition: EHScopeStack.h:295
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:368
LValue - This represents an lvalue references.
Definition: CGValue.h:182
CharUnits getAlignment() const
Definition: CGValue.h:343
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:432
QualType getType() const
Definition: CGValue.h:291
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
A stack of loop information corresponding to loop nesting levels.
Definition: CGLoopInfo.h:204
Information used when generating a structured loop.
Definition: CGLoopInfo.h:90
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
An abstract representation of an aligned address.
Definition: Address.h:42
static RawAddress invalid()
Definition: Address.h:61
bool isValid() const
Definition: Address.h:62
Class provides a way to call simple version of codegen for OpenMP region, or an advanced with possibl...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:372
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition: TargetInfo.h:47
The class detects jumps which bypass local variables declaration: goto L; int a; L:
CompoundAssignOperator - For compound assignments (e.g.
Definition: Expr.h:4122
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3428
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1611
ContinueStmt - This represents a continue.
Definition: Stmt.h:2960
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition: StmtCXX.h:473
Represents the body of a coroutine.
Definition: StmtCXX.h:320
Represents an expression that might suspend coroutine execution; either a co_await or co_yield expres...
Definition: ExprCXX.h:5071
Represents a 'co_yield' expression.
Definition: ExprCXX.h:5266
Represents the current source location and context used to determine the value of the source location...
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2370
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1502
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2735
This represents one expression.
Definition: Expr.h:110
QualType getType() const
Definition: Expr.h:142
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:6305
Represents a member of a struct/union/class.
Definition: Decl.h:3030
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2791
Represents a function declaration or definition.
Definition: Decl.h:1932
Represents a prototype with parameter type info, e.g.
Definition: Type.h:5002
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
const Decl * getDecl() const
Definition: GlobalDecl.h:103
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2872
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2148
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2911
Describes an C or C++ initializer list.
Definition: Expr.h:5039
Represents the declaration of a label.
Definition: Decl.h:499
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2041
FPExceptionModeKind
Possible floating point exception behavior.
Definition: LangOptions.h:276
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:476
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4728
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2752
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3187
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3508
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
This represents '#pragma omp atomic' directive.
Definition: StmtOpenMP.h:2947
This represents '#pragma omp barrier' directive.
Definition: StmtOpenMP.h:2625
This represents '#pragma omp cancel' directive.
Definition: StmtOpenMP.h:3655
This represents '#pragma omp cancellation point' directive.
Definition: StmtOpenMP.h:3597
Representation of an OpenMP canonical loop.
Definition: StmtOpenMP.h:142
This represents '#pragma omp critical' directive.
Definition: StmtOpenMP.h:2076
This represents '#pragma omp depobj' directive.
Definition: StmtOpenMP.h:2841
This represents '#pragma omp distribute' directive.
Definition: StmtOpenMP.h:4425
This represents '#pragma omp distribute parallel for' composite directive.
Definition: StmtOpenMP.h:4547
This represents '#pragma omp distribute parallel for simd' composite directive.
Definition: StmtOpenMP.h:4643
This represents '#pragma omp distribute simd' composite directive.
Definition: StmtOpenMP.h:4708
This represents '#pragma omp error' directive.
Definition: StmtOpenMP.h:6432
This is a basic class for representing single OpenMP executable directive.
Definition: StmtOpenMP.h:266
This represents '#pragma omp flush' directive.
Definition: StmtOpenMP.h:2789
This represents '#pragma omp for' directive.
Definition: StmtOpenMP.h:1634
This represents '#pragma omp for simd' directive.
Definition: StmtOpenMP.h:1724
This represents '#pragma omp loop' directive.
Definition: StmtOpenMP.h:6103
Represents the '#pragma omp interchange' loop transformation directive.
Definition: StmtOpenMP.h:5769
This represents '#pragma omp interop' directive.
Definition: StmtOpenMP.h:5895
This is a common base class for loop directives ('omp simd', 'omp for', 'omp for simd' etc....
Definition: StmtOpenMP.h:1004
This represents '#pragma omp masked' directive.
Definition: StmtOpenMP.h:6013
This represents '#pragma omp master' directive.
Definition: StmtOpenMP.h:2028
This represents '#pragma omp master taskloop' directive.
Definition: StmtOpenMP.h:3854
This represents '#pragma omp master taskloop simd' directive.
Definition: StmtOpenMP.h:4006
This represents '#pragma omp metadirective' directive.
Definition: StmtOpenMP.h:6064
This represents '#pragma omp ordered' directive.
Definition: StmtOpenMP.h:2893
This represents '#pragma omp parallel' directive.
Definition: StmtOpenMP.h:612
This represents '#pragma omp parallel for' directive.
Definition: StmtOpenMP.h:2147
This represents '#pragma omp parallel for simd' directive.
Definition: StmtOpenMP.h:2244
This represents '#pragma omp parallel masked' directive.
Definition: StmtOpenMP.h:2372
This represents '#pragma omp parallel master' directive.
Definition: StmtOpenMP.h:2309
This represents '#pragma omp parallel master taskloop' directive.
Definition: StmtOpenMP.h:4137
This represents '#pragma omp parallel master taskloop simd' directive.
Definition: StmtOpenMP.h:4293
This represents '#pragma omp parallel sections' directive.
Definition: StmtOpenMP.h:2436
Represents the '#pragma omp reverse' loop transformation directive.
Definition: StmtOpenMP.h:5704
This represents '#pragma omp scan' directive.
Definition: StmtOpenMP.h:5842
This represents '#pragma omp section' directive.
Definition: StmtOpenMP.h:1864
This represents '#pragma omp sections' directive.
Definition: StmtOpenMP.h:1787
This represents '#pragma omp simd' directive.
Definition: StmtOpenMP.h:1571
This represents '#pragma omp single' directive.
Definition: StmtOpenMP.h:1977
This represents '#pragma omp target data' directive.
Definition: StmtOpenMP.h:3206
This represents '#pragma omp target' directive.
Definition: StmtOpenMP.h:3152
This represents '#pragma omp target enter data' directive.
Definition: StmtOpenMP.h:3260
This represents '#pragma omp target exit data' directive.
Definition: StmtOpenMP.h:3315
This represents '#pragma omp target parallel' directive.
Definition: StmtOpenMP.h:3369
This represents '#pragma omp target parallel for' directive.
Definition: StmtOpenMP.h:3449
This represents '#pragma omp target parallel for simd' directive.
Definition: StmtOpenMP.h:4774
This represents '#pragma omp target parallel loop' directive.
Definition: StmtOpenMP.h:6370
This represents '#pragma omp target simd' directive.
Definition: StmtOpenMP.h:4841
This represents '#pragma omp target teams' directive.
Definition: StmtOpenMP.h:5199
This represents '#pragma omp target teams distribute' combined directive.
Definition: StmtOpenMP.h:5255
This represents '#pragma omp target teams distribute parallel for' combined directive.
Definition: StmtOpenMP.h:5322
This represents '#pragma omp target teams distribute parallel for simd' combined directive.
Definition: StmtOpenMP.h:5420
This represents '#pragma omp target teams distribute simd' combined directive.
Definition: StmtOpenMP.h:5490
This represents '#pragma omp target teams loop' directive.
Definition: StmtOpenMP.h:6230
This represents '#pragma omp target update' directive.
Definition: StmtOpenMP.h:4491
This represents '#pragma omp task' directive.
Definition: StmtOpenMP.h:2517
This represents '#pragma omp taskloop' directive.
Definition: StmtOpenMP.h:3715
This represents '#pragma omp taskloop simd' directive.
Definition: StmtOpenMP.h:3788
This represents '#pragma omp taskgroup' directive.
Definition: StmtOpenMP.h:2722
This represents '#pragma omp taskwait' directive.
Definition: StmtOpenMP.h:2671
This represents '#pragma omp taskyield' directive.
Definition: StmtOpenMP.h:2579
This represents '#pragma omp teams' directive.
Definition: StmtOpenMP.h:3544
This represents '#pragma omp teams distribute' directive.
Definition: StmtOpenMP.h:4906
This represents '#pragma omp teams distribute parallel for' composite directive.
Definition: StmtOpenMP.h:5106
This represents '#pragma omp teams distribute parallel for simd' composite directive.
Definition: StmtOpenMP.h:5040
This represents '#pragma omp teams distribute simd' combined directive.
Definition: StmtOpenMP.h:4972
This represents '#pragma omp teams loop' directive.
Definition: StmtOpenMP.h:6165
This represents the '#pragma omp tile' loop transformation directive.
Definition: StmtOpenMP.h:5548
This represents the '#pragma omp unroll' loop transformation directive.
Definition: StmtOpenMP.h:5630
This represents clause 'use_device_addr' in the '#pragma omp ...' directives.
This represents clause 'use_device_ptr' in the '#pragma omp ...' directives.
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp,...
Definition: ExprObjC.h:191
Represents Objective-C's @synchronized statement.
Definition: StmtObjC.h:303
Represents Objective-C's @throw statement.
Definition: StmtObjC.h:358
Represents Objective-C's @try ... @catch ... @finally statement.
Definition: StmtObjC.h:167
Represents Objective-C's @autoreleasepool Statement.
Definition: StmtObjC.h:394
ObjCBoxedExpr - used for generalized expression boxing.
Definition: ExprObjC.h:127
ObjCContainerDecl - Represents a container for method declarations.
Definition: DeclObjC.h:947
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:309
ObjCEncodeExpr, used for @encode in Objective-C.
Definition: ExprObjC.h:410
Represents Objective-C's collection statement.
Definition: StmtObjC.h:23
ObjCImplementationDecl - Represents a class definition - this is where method definitions are specifi...
Definition: DeclObjC.h:2596
Represents an ObjC class declaration.
Definition: DeclObjC.h:1153
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition: ExprObjC.h:1491
ObjCIvarDecl - Represents an ObjC instance variable.
Definition: DeclObjC.h:1951
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:945
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:140
ObjCPropertyImplDecl - Represents implementation declaration of a property in a class or category imp...
Definition: DeclObjC.h:2804
ObjCProtocolExpr used for protocol expression in Objective-C.
Definition: ExprObjC.h:505
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:455
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:51
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1173
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1223
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Definition: StmtOpenACC.h:132
This class represents a 'loop' construct.
Definition: StmtOpenACC.h:200
Represents a parameter to a function.
Definition: Decl.h:1722
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3187
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1991
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6497
A (possibly-)qualified type.
Definition: Type.h:941
@ DK_cxx_destructor
Definition: Type.h:1532
@ DK_nontrivial_c_struct
Definition: Type.h:1535
@ DK_objc_weak_lifetime
Definition: Type.h:1534
@ DK_objc_strong_lifetime
Definition: Type.h:1533
The collection of all-type qualifiers we support.
Definition: Type.h:319
Represents a struct/union/class.
Definition: Decl.h:4145
bool hasVolatileMember() const
Definition: Decl.h:4208
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5965
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3029
Represents a __leave statement.
Definition: Stmt.h:3728
Flags to identify the types for overloaded SVE builtins.
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
A trivial tuple used to represent a source range.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4417
Stmt - This represents one statement.
Definition: Stmt.h:84
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1306
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1308
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2398
Exposes information about the current target.
Definition: TargetInfo.h:218
Represents a declaration of a type.
Definition: Decl.h:3367
The base class of the type hierarchy.
Definition: Type.h:1829
bool isReferenceType() const
Definition: Type.h:8021
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8540
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2188
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4701
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:667
QualType getType() const
Definition: Decl.h:678
Represents a variable declaration or definition.
Definition: Decl.h:879
VarDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition: Decl.cpp:2239
bool isLocalVarDeclOrParm() const
Similar to isLocalVarDecl but also includes parameters.
Definition: Decl.h:1210
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3795
Expr * getSizeExpr() const
Definition: Type.h:3814
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2594
Defines the clang::TargetInfo interface.
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:141
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:84
ARCPreciseLifetime_t
Does an ARC strong l-value have precise lifetime?
Definition: CGValue.h:135
@ NotKnownNonNull
Definition: Address.h:33
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const AstTypeMatcher< ComplexType > complexType
Matches C99 complex types.
The JSON file list parser is used to communicate input to InstallAPI.
CXXCtorType
C++ constructor types.
Definition: ABI.h:24
llvm::omp::Directive OpenMPDirectiveKind
OpenMP directives.
Definition: OpenMPKinds.h:24
BinaryOperatorKind
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ CR_Default
Definition: CapturedStmt.h:17
OpenMPDistScheduleClauseKind
OpenMP attributes for 'dist_schedule' clause.
Definition: OpenMPKinds.h:103
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
Definition: Linkage.h:24
@ Result
The result type of a method or function.
CXXDtorType
C++ destructor types.
Definition: ABI.h:33
const FunctionProtoType * T
@ Success
Template argument deduction was successful.
llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
#define true
Definition: stdbool.h:25
#define false
Definition: stdbool.h:26
#define bool
Definition: stdbool.h:24
Structure with information about how a bitfield should be accessed.
llvm::SmallVector< llvm::AllocaInst * > Take()
CXXDefaultArgExprScope(CodeGenFunction &CGF, const CXXDefaultArgExpr *E)
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth, unsigned Index)
Header for data within LifetimeExtendedCleanupStack.
unsigned Size
The size of the following cleanup object.
unsigned IsConditional
Whether this is a conditional cleanup.
MultiVersionResolverOption(llvm::Function *F, StringRef Arch, ArrayRef< StringRef > Feats)
struct clang::CodeGen::CodeGenFunction::MultiVersionResolverOption::Conds Conditions
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::OpenMPIRBuilder::InsertPointTy InsertPointTy
static void EmitOMPOutlinedRegionBody(CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Twine RegionName)
Emit the body of an OMP region that will be outlined in OpenMPIRBuilder::finalize().
static Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable /p VD.
static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP, llvm::BasicBlock &FiniBB, llvm::Function *Fn, ArrayRef< llvm::Value * > Args)
static std::string getNameWithSeparators(ArrayRef< StringRef > Parts, StringRef FirstSeparator=".", StringRef Separator=".")
Get the platform-specific name separator.
static void FinalizeOMPRegion(CodeGenFunction &CGF, InsertPointTy IP)
Emit the Finalization for an OMP region.
static void EmitOMPInlinedRegionBody(CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Twine RegionName)
Emit the body of an OMP region.
OMPBuilderCBHelpers & operator=(const OMPBuilderCBHelpers &)=delete
OMPBuilderCBHelpers(const OMPBuilderCBHelpers &)=delete
OMPTargetDataInfo(Address BasePointersArray, Address PointersArray, Address SizesArray, Address MappersArray, unsigned NumberOfTargetItems)
llvm::PointerUnion< const FunctionProtoType *, const ObjCMethodDecl * > P
Struct with all information about dynamic [sub]class needed to set vptr.
This structure provides a set of types that are commonly used during IR emission.
Helper class with most of the code for saving a value for a conditional expression cleanup.
static llvm::Value * restore(CodeGenFunction &CGF, saved_type value)
static saved_type save(CodeGenFunction &CGF, llvm::Value *value)
static bool needsSaving(llvm::Value *value)
Answer whether the given value needs extra work to be saved.
llvm::PointerIntPair< llvm::Value *, 1, bool > saved_type
static type restore(CodeGenFunction &CGF, saved_type value)
static type restore(CodeGenFunction &CGF, saved_type value)
static saved_type save(CodeGenFunction &CGF, type value)
static saved_type save(CodeGenFunction &CGF, type value)
static type restore(CodeGenFunction &CGF, saved_type value)
A metaprogramming class for ensuring that a value will dominate an arbitrary position in a function.
Definition: EHScopeStack.h:65
static saved_type save(CodeGenFunction &CGF, type value)
Definition: EHScopeStack.h:59
Scheduling data for loop-based OpenMP directives.
Definition: OpenMPKinds.h:179
The this pointer adjustment as well as an optional return adjustment for a thunk.
Definition: Thunk.h:157