clang 20.0.0git
CodeGenFunction.h
Go to the documentation of this file.
1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the internal per-function state used for llvm translation.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
15
16#include "CGBuilder.h"
17#include "CGDebugInfo.h"
18#include "CGLoopInfo.h"
19#include "CGValue.h"
20#include "CodeGenModule.h"
21#include "CodeGenPGO.h"
22#include "EHScopeStack.h"
23#include "VarBypassDetector.h"
24#include "clang/AST/CharUnits.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
31#include "clang/AST/Type.h"
32#include "clang/Basic/ABI.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
39#include "llvm/ADT/MapVector.h"
40#include "llvm/ADT/SmallVector.h"
41#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/ValueHandle.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Transforms/Utils/SanitizerStats.h"
46#include <optional>
47
48namespace llvm {
49class BasicBlock;
50class LLVMContext;
51class MDNode;
52class SwitchInst;
53class Twine;
54class Value;
55class CanonicalLoopInfo;
56}
57
58namespace clang {
59class ASTContext;
60class CXXDestructorDecl;
61class CXXForRangeStmt;
62class CXXTryStmt;
63class Decl;
64class LabelDecl;
65class FunctionDecl;
66class FunctionProtoType;
67class LabelStmt;
68class ObjCContainerDecl;
69class ObjCInterfaceDecl;
70class ObjCIvarDecl;
71class ObjCMethodDecl;
72class ObjCImplementationDecl;
73class ObjCPropertyImplDecl;
74class TargetInfo;
75class VarDecl;
76class ObjCForCollectionStmt;
77class ObjCAtTryStmt;
78class ObjCAtThrowStmt;
79class ObjCAtSynchronizedStmt;
80class ObjCAutoreleasePoolStmt;
81class OMPUseDevicePtrClause;
82class OMPUseDeviceAddrClause;
83class SVETypeFlags;
84class OMPExecutableDirective;
85
86namespace analyze_os_log {
87class OSLogBufferLayout;
88}
89
90namespace CodeGen {
91class CodeGenTypes;
92class CGCallee;
93class CGFunctionInfo;
94class CGBlockInfo;
95class CGCXXABI;
96class BlockByrefHelpers;
97class BlockByrefInfo;
98class BlockFieldFlags;
99class RegionCodeGenTy;
100class TargetCodeGenInfo;
101struct OMPTaskDataTy;
102struct CGCoroData;
103
104/// The kind of evaluation to perform on values of a particular
105/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
106/// CGExprAgg?
107///
108/// TODO: should vectors maybe be split out into their own thing?
114
115#define LIST_SANITIZER_CHECKS \
116 SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
117 SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
118 SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
119 SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
120 SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
121 SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
122 SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
123 SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
124 SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
125 SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
126 SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
127 SANITIZER_CHECK(MissingReturn, missing_return, 0) \
128 SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
129 SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
130 SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
131 SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
132 SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
133 SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
134 SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
135 SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
136 SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
137 SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
138 SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
139 SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
140 SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0) \
141 SANITIZER_CHECK(BoundsSafety, bounds_safety, 0)
142
144#define SANITIZER_CHECK(Enum, Name, Version) Enum,
146#undef SANITIZER_CHECK
148
149/// Helper class with most of the code for saving a value for a
150/// conditional expression cleanup.
152 typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
153
154 /// Answer whether the given value needs extra work to be saved.
155 static bool needsSaving(llvm::Value *value) {
156 if (!value)
157 return false;
158
159 // If it's not an instruction, we don't need to save.
160 if (!isa<llvm::Instruction>(value)) return false;
161
162 // If it's an instruction in the entry block, we don't need to save.
163 llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
164 return (block != &block->getParent()->getEntryBlock());
165 }
166
167 static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
168 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
169};
170
171/// A partial specialization of DominatingValue for llvm::Values that
172/// might be llvm::Instructions.
173template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
174 typedef T *type;
176 return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
177 }
178};
179
180/// A specialization of DominatingValue for Address.
181template <> struct DominatingValue<Address> {
182 typedef Address type;
183
184 struct saved_type {
186 llvm::Type *ElementType;
189 llvm::PointerType *EffectiveType;
190 };
191
192 static bool needsSaving(type value) {
195 return true;
196 return false;
197 }
198 static saved_type save(CodeGenFunction &CGF, type value) {
199 return {DominatingLLVMValue::save(CGF, value.getBasePointer()),
200 value.getElementType(), value.getAlignment(),
201 DominatingLLVMValue::save(CGF, value.getOffset()), value.getType()};
202 }
204 return Address(DominatingLLVMValue::restore(CGF, value.BasePtr),
205 value.ElementType, value.Alignment, CGPointerAuthInfo(),
206 DominatingLLVMValue::restore(CGF, value.Offset));
207 }
208};
209
210/// A specialization of DominatingValue for RValue.
211template <> struct DominatingValue<RValue> {
212 typedef RValue type;
214 enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
215 AggregateAddress, ComplexAddress };
216 union {
217 struct {
219 } Vals;
221 };
222 LLVM_PREFERRED_TYPE(Kind)
223 unsigned K : 3;
224
226 : Vals{Val1, DominatingLLVMValue::saved_type()}, K(K) {}
227
230 : Vals{Val1, Val2}, K(ComplexAddress) {}
231
232 saved_type(DominatingValue<Address>::saved_type AggregateAddr, unsigned K)
233 : AggregateAddr(AggregateAddr), K(K) {}
234
235 public:
236 static bool needsSaving(RValue value);
239
240 // implementations in CGCleanup.cpp
241 };
242
243 static bool needsSaving(type value) {
244 return saved_type::needsSaving(value);
245 }
246 static saved_type save(CodeGenFunction &CGF, type value) {
247 return saved_type::save(CGF, value);
248 }
250 return value.restore(CGF);
251 }
252};
253
254/// CodeGenFunction - This class organizes the per-function state that is used
255/// while generating LLVM code.
257 CodeGenFunction(const CodeGenFunction &) = delete;
258 void operator=(const CodeGenFunction &) = delete;
259
260 friend class CGCXXABI;
261public:
262 /// A jump destination is an abstract label, branching to which may
263 /// require a jump out through normal cleanups.
264 struct JumpDest {
265 JumpDest() : Block(nullptr), Index(0) {}
266 JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
267 unsigned Index)
268 : Block(Block), ScopeDepth(Depth), Index(Index) {}
269
270 bool isValid() const { return Block != nullptr; }
271 llvm::BasicBlock *getBlock() const { return Block; }
272 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
273 unsigned getDestIndex() const { return Index; }
274
275 // This should be used cautiously.
277 ScopeDepth = depth;
278 }
279
280 private:
281 llvm::BasicBlock *Block;
283 unsigned Index;
284 };
285
286 CodeGenModule &CGM; // Per-module state.
288
289 // For EH/SEH outlined funclets, this field points to parent's CGF
291
292 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
295
296 // Stores variables for which we can't generate correct lifetime markers
297 // because of jumps.
299
300 /// List of recently emitted OMPCanonicalLoops.
301 ///
302 /// Since OMPCanonicalLoops are nested inside other statements (in particular
303 /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
304 /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
305 /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
306 /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
307 /// this stack when done. Entering a new loop requires clearing this list; it
308 /// either means we start parsing a new loop nest (in which case the previous
309 /// loop nest goes out of scope) or a second loop in the same level in which
310 /// case it would be ambiguous into which of the two (or more) loops the loop
311 /// nest would extend.
313
314 /// Stack to track the Logical Operator recursion nest for MC/DC.
316
317 /// Stack to track the controlled convergence tokens.
319
320 /// Number of nested loop to be consumed by the last surrounding
321 /// loop-associated directive.
323
324 // CodeGen lambda for loops and support for ordered clause
325 typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
326 JumpDest)>
328 typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
329 const unsigned, const bool)>
331
332 // Codegen lambda for loop bounds in worksharing loop constructs
333 typedef llvm::function_ref<std::pair<LValue, LValue>(
336
337 // Codegen lambda for loop bounds in dispatch-based loop implementation
338 typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
340 Address UB)>
342
343 /// CGBuilder insert helper. This function is called after an
344 /// instruction is created using Builder.
345 void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
346 llvm::BasicBlock::iterator InsertPt) const;
347
348 /// CurFuncDecl - Holds the Decl for the current outermost
349 /// non-closure context.
350 const Decl *CurFuncDecl = nullptr;
351 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
352 const Decl *CurCodeDecl = nullptr;
353 const CGFunctionInfo *CurFnInfo = nullptr;
355 llvm::Function *CurFn = nullptr;
356
357 /// Save Parameter Decl for coroutine.
359
360 // Holds coroutine data if the current function is a coroutine. We use a
361 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
362 // in this header.
363 struct CGCoroInfo {
364 std::unique_ptr<CGCoroData> Data;
365 bool InSuspendBlock = false;
366 CGCoroInfo();
367 ~CGCoroInfo();
368 };
370
371 bool isCoroutine() const {
372 return CurCoro.Data != nullptr;
373 }
374
375 bool inSuspendBlock() const {
377 }
378
379 // Holds FramePtr for await_suspend wrapper generation,
380 // so that __builtin_coro_frame call can be lowered
381 // directly to value of its second argument
383 llvm::Value *FramePtr = nullptr;
384 };
386
387 // Generates wrapper function for `llvm.coro.await.suspend.*` intrinisics.
388 // It encapsulates SuspendExpr in a function, to separate it's body
389 // from the main coroutine to avoid miscompilations. Intrinisic
390 // is lowered to this function call in CoroSplit pass
391 // Function signature is:
392 // <type> __await_suspend_wrapper_<name>(ptr %awaiter, ptr %hdl)
393 // where type is one of (void, i1, ptr)
394 llvm::Function *generateAwaitSuspendWrapper(Twine const &CoroName,
395 Twine const &SuspendPointName,
396 CoroutineSuspendExpr const &S);
397
398 /// CurGD - The GlobalDecl for the current function being compiled.
400
401 /// PrologueCleanupDepth - The cleanup depth enclosing all the
402 /// cleanups associated with the parameters.
404
405 /// ReturnBlock - Unified return block.
407
408 /// ReturnValue - The temporary alloca to hold the return
409 /// value. This is invalid iff the function has no return value.
411
412 /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
413 /// This is invalid if sret is not in use.
415
416 /// If a return statement is being visited, this holds the return statment's
417 /// result expression.
418 const Expr *RetExpr = nullptr;
419
420 /// Return true if a label was seen in the current scope.
422 if (CurLexicalScope)
423 return CurLexicalScope->hasLabels();
424 return !LabelMap.empty();
425 }
426
427 /// AllocaInsertPoint - This is an instruction in the entry block before which
428 /// we prefer to insert allocas.
429 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
430
431private:
432 /// PostAllocaInsertPt - This is a place in the prologue where code can be
433 /// inserted that will be dominated by all the static allocas. This helps
434 /// achieve two things:
435 /// 1. Contiguity of all static allocas (within the prologue) is maintained.
436 /// 2. All other prologue code (which are dominated by static allocas) do
437 /// appear in the source order immediately after all static allocas.
438 ///
439 /// PostAllocaInsertPt will be lazily created when it is *really* required.
440 llvm::AssertingVH<llvm::Instruction> PostAllocaInsertPt = nullptr;
441
442public:
443 /// Return PostAllocaInsertPt. If it is not yet created, then insert it
444 /// immediately after AllocaInsertPt.
445 llvm::Instruction *getPostAllocaInsertPoint() {
446 if (!PostAllocaInsertPt) {
447 assert(AllocaInsertPt &&
448 "Expected static alloca insertion point at function prologue");
449 assert(AllocaInsertPt->getParent()->isEntryBlock() &&
450 "EBB should be entry block of the current code gen function");
451 PostAllocaInsertPt = AllocaInsertPt->clone();
452 PostAllocaInsertPt->setName("postallocapt");
453 PostAllocaInsertPt->insertAfter(AllocaInsertPt);
454 }
455
456 return PostAllocaInsertPt;
457 }
458
459 /// API for captured statement code generation.
461 public:
463 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
466 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
467
469 S.getCapturedRecordDecl()->field_begin();
470 for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
471 E = S.capture_end();
472 I != E; ++I, ++Field) {
473 if (I->capturesThis())
474 CXXThisFieldDecl = *Field;
475 else if (I->capturesVariable())
476 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
477 else if (I->capturesVariableByCopy())
478 CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field;
479 }
480 }
481
482 virtual ~CGCapturedStmtInfo();
483
484 CapturedRegionKind getKind() const { return Kind; }
485
486 virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
487 // Retrieve the value of the context parameter.
488 virtual llvm::Value *getContextValue() const { return ThisValue; }
489
490 /// Lookup the captured field decl for a variable.
491 virtual const FieldDecl *lookup(const VarDecl *VD) const {
492 return CaptureFields.lookup(VD->getCanonicalDecl());
493 }
494
495 bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
496 virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
497
498 static bool classof(const CGCapturedStmtInfo *) {
499 return true;
500 }
501
502 /// Emit the captured statement body.
503 virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
505 CGF.EmitStmt(S);
506 }
507
508 /// Get the name of the capture helper.
509 virtual StringRef getHelperName() const { return "__captured_stmt"; }
510
511 /// Get the CaptureFields
512 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
513 return CaptureFields;
514 }
515
516 private:
517 /// The kind of captured statement being generated.
519
520 /// Keep the map between VarDecl and FieldDecl.
521 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
522
523 /// The base address of the captured record, passed in as the first
524 /// argument of the parallel region function.
525 llvm::Value *ThisValue;
526
527 /// Captured 'this' type.
528 FieldDecl *CXXThisFieldDecl;
529 };
531
532 /// RAII for correct setting/restoring of CapturedStmtInfo.
534 private:
535 CodeGenFunction &CGF;
536 CGCapturedStmtInfo *PrevCapturedStmtInfo;
537 public:
539 CGCapturedStmtInfo *NewCapturedStmtInfo)
540 : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
541 CGF.CapturedStmtInfo = NewCapturedStmtInfo;
542 }
543 ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
544 };
545
546 /// An abstract representation of regular/ObjC call/message targets.
548 /// The function declaration of the callee.
549 const Decl *CalleeDecl;
550
551 public:
552 AbstractCallee() : CalleeDecl(nullptr) {}
553 AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
554 AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
555 bool hasFunctionDecl() const {
556 return isa_and_nonnull<FunctionDecl>(CalleeDecl);
557 }
558 const Decl *getDecl() const { return CalleeDecl; }
559 unsigned getNumParams() const {
560 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
561 return FD->getNumParams();
562 return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
563 }
564 const ParmVarDecl *getParamDecl(unsigned I) const {
565 if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
566 return FD->getParamDecl(I);
567 return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
568 }
569 };
570
571 /// Sanitizers enabled for this function.
573
574 /// True if CodeGen currently emits code implementing sanitizer checks.
575 bool IsSanitizerScope = false;
576
577 /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
579 CodeGenFunction *CGF;
580 public:
583 };
584
585 /// In C++, whether we are code generating a thunk. This controls whether we
586 /// should emit cleanups.
587 bool CurFuncIsThunk = false;
588
589 /// In ARC, whether we should autorelease the return value.
590 bool AutoreleaseResult = false;
591
592 /// Whether we processed a Microsoft-style asm block during CodeGen. These can
593 /// potentially set the return value.
594 bool SawAsmBlock = false;
595
597
598 /// True if the current function is an outlined SEH helper. This can be a
599 /// finally block or filter expression.
601
602 /// True if CodeGen currently emits code inside presereved access index
603 /// region.
605
606 /// True if the current statement has nomerge attribute.
608
609 /// True if the current statement has noinline attribute.
611
612 /// True if the current statement has always_inline attribute.
614
615 /// True if the current statement has noconvergent attribute.
617
618 // The CallExpr within the current statement that the musttail attribute
619 // applies to. nullptr if there is no 'musttail' on the current statement.
620 const CallExpr *MustTailCall = nullptr;
621
622 /// Returns true if a function must make progress, which means the
623 /// mustprogress attribute can be added.
625 if (CGM.getCodeGenOpts().getFiniteLoops() ==
627 return false;
628
629 // C++11 and later guarantees that a thread eventually will do one of the
630 // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
631 // - terminate,
632 // - make a call to a library I/O function,
633 // - perform an access through a volatile glvalue, or
634 // - perform a synchronization operation or an atomic operation.
635 //
636 // Hence each function is 'mustprogress' in C++11 or later.
637 return getLangOpts().CPlusPlus11;
638 }
639
640 /// Returns true if a loop must make progress, which means the mustprogress
641 /// attribute can be added. \p HasConstantCond indicates whether the branch
642 /// condition is a known constant.
643 bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody);
644
646 llvm::Value *BlockPointer = nullptr;
647
648 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
650
651 /// A mapping from NRVO variables to the flags used to indicate
652 /// when the NRVO has been applied to this variable.
653 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
654
657
658 // A stack of cleanups which were added to EHStack but have to be deactivated
659 // later before being popped or emitted. These are usually deactivated on
660 // exiting a `CleanupDeactivationScope` scope. For instance, after a
661 // full-expr.
662 //
663 // These are specially useful for correctly emitting cleanups while
664 // encountering branches out of expression (through stmt-expr or coroutine
665 // suspensions).
668 llvm::Instruction *DominatingIP;
669 };
671
672 // Enters a new scope for capturing cleanups which are deferred to be
673 // deactivated, all of which will be deactivated once the scope is exited.
682
684 assert(!Deactivated && "Deactivating already deactivated scope");
686 for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) {
687 CGF.DeactivateCleanupBlock(Stack[I - 1].Cleanup,
688 Stack[I - 1].DominatingIP);
689 Stack[I - 1].DominatingIP->eraseFromParent();
690 }
691 Stack.resize(OldDeactivateCleanupStackSize);
692 Deactivated = true;
693 }
694
696 if (Deactivated)
697 return;
699 }
700 };
701
703
704 llvm::Instruction *CurrentFuncletPad = nullptr;
705
707 bool isRedundantBeforeReturn() override { return true; }
708
709 llvm::Value *Addr;
710 llvm::Value *Size;
711
712 public:
713 CallLifetimeEnd(RawAddress addr, llvm::Value *size)
714 : Addr(addr.getPointer()), Size(size) {}
715
716 void Emit(CodeGenFunction &CGF, Flags flags) override {
717 CGF.EmitLifetimeEnd(Size, Addr);
718 }
719 };
720
721 /// Header for data within LifetimeExtendedCleanupStack.
723 /// The size of the following cleanup object.
724 unsigned Size;
725 /// The kind of cleanup to push.
726 LLVM_PREFERRED_TYPE(CleanupKind)
728 /// Whether this is a conditional cleanup.
729 LLVM_PREFERRED_TYPE(bool)
730 unsigned IsConditional : 1;
731
732 size_t getSize() const { return Size; }
733 CleanupKind getKind() const { return (CleanupKind)Kind; }
734 bool isConditional() const { return IsConditional; }
735 };
736
737 /// i32s containing the indexes of the cleanup destinations.
739
741
742 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
743 llvm::BasicBlock *EHResumeBlock = nullptr;
744
745 /// The exception slot. All landing pads write the current exception pointer
746 /// into this alloca.
747 llvm::Value *ExceptionSlot = nullptr;
748
749 /// The selector slot. Under the MandatoryCleanup model, all landing pads
750 /// write the current selector value into this alloca.
751 llvm::AllocaInst *EHSelectorSlot = nullptr;
752
753 /// A stack of exception code slots. Entering an __except block pushes a slot
754 /// on the stack and leaving pops one. The __exception_code() intrinsic loads
755 /// a value from the top of the stack.
757
758 /// Value returned by __exception_info intrinsic.
759 llvm::Value *SEHInfo = nullptr;
760
761 /// Emits a landing pad for the current EH stack.
762 llvm::BasicBlock *EmitLandingPad();
763
764 llvm::BasicBlock *getInvokeDestImpl();
765
766 /// Parent loop-based directive for scan directive.
768 llvm::BasicBlock *OMPBeforeScanBlock = nullptr;
769 llvm::BasicBlock *OMPAfterScanBlock = nullptr;
770 llvm::BasicBlock *OMPScanExitBlock = nullptr;
771 llvm::BasicBlock *OMPScanDispatch = nullptr;
772 bool OMPFirstScanLoop = false;
773
774 /// Manages parent directive for scan directives.
776 CodeGenFunction &CGF;
777 const OMPExecutableDirective *ParentLoopDirectiveForScan;
778
779 public:
781 CodeGenFunction &CGF,
782 const OMPExecutableDirective &ParentLoopDirectiveForScan)
783 : CGF(CGF),
784 ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) {
785 CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan;
786 }
788 CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan;
789 }
790 };
791
792 template <class T>
794 return DominatingValue<T>::save(*this, value);
795 }
796
798 public:
799 CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
802
803 private:
804 void ConstructorHelper(FPOptions FPFeatures);
805 CodeGenFunction &CGF;
806 FPOptions OldFPFeatures;
807 llvm::fp::ExceptionBehavior OldExcept;
808 llvm::RoundingMode OldRounding;
809 std::optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
810 };
812
813public:
814 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
815 /// rethrows.
817
818 /// A class controlling the emission of a finally block.
820 /// Where the catchall's edge through the cleanup should go.
821 JumpDest RethrowDest;
822
823 /// A function to call to enter the catch.
824 llvm::FunctionCallee BeginCatchFn;
825
826 /// An i1 variable indicating whether or not the @finally is
827 /// running for an exception.
828 llvm::AllocaInst *ForEHVar = nullptr;
829
830 /// An i8* variable into which the exception pointer to rethrow
831 /// has been saved.
832 llvm::AllocaInst *SavedExnVar = nullptr;
833
834 public:
835 void enter(CodeGenFunction &CGF, const Stmt *Finally,
836 llvm::FunctionCallee beginCatchFn,
837 llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn);
838 void exit(CodeGenFunction &CGF);
839 };
840
841 /// Returns true inside SEH __try blocks.
842 bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
843
844 /// Returns true while emitting a cleanuppad.
845 bool isCleanupPadScope() const {
846 return CurrentFuncletPad && isa<llvm::CleanupPadInst>(CurrentFuncletPad);
847 }
848
849 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
850 /// current full-expression. Safe against the possibility that
851 /// we're currently inside a conditionally-evaluated expression.
852 template <class T, class... As>
853 void pushFullExprCleanup(CleanupKind kind, As... A) {
854 // If we're not in a conditional branch, or if none of the
855 // arguments requires saving, then use the unconditional cleanup.
857 return EHStack.pushCleanup<T>(kind, A...);
858
859 // Stash values in a tuple so we can guarantee the order of saves.
860 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
861 SavedTuple Saved{saveValueInCond(A)...};
862
863 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
864 EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
866 }
867
868 /// Queue a cleanup to be pushed after finishing the current full-expression,
869 /// potentially with an active flag.
870 template <class T, class... As>
873 return pushCleanupAfterFullExprWithActiveFlag<T>(
874 Kind, RawAddress::invalid(), A...);
875
876 RawAddress ActiveFlag = createCleanupActiveFlag();
877 assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
878 "cleanup active flag should never need saving");
879
880 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
881 SavedTuple Saved{saveValueInCond(A)...};
882
883 typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
884 pushCleanupAfterFullExprWithActiveFlag<CleanupType>(Kind, ActiveFlag, Saved);
885 }
886
887 template <class T, class... As>
889 RawAddress ActiveFlag, As... A) {
890 LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
891 ActiveFlag.isValid()};
892
895 LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
896 (Header.IsConditional ? sizeof(ActiveFlag) : 0));
897
898 static_assert(sizeof(Header) % alignof(T) == 0,
899 "Cleanup will be allocated on misaligned address");
900 char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
901 new (Buffer) LifetimeExtendedCleanupHeader(Header);
902 new (Buffer + sizeof(Header)) T(A...);
903 if (Header.IsConditional)
904 new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag);
905 }
906
907 // Push a cleanup onto EHStack and deactivate it later. It is usually
908 // deactivated when exiting a `CleanupDeactivationScope` (for example: after a
909 // full expression).
910 template <class T, class... As>
912 // Placeholder dominating IP for this cleanup.
913 llvm::Instruction *DominatingIP =
914 Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
915 EHStack.pushCleanup<T>(Kind, A...);
917 {EHStack.stable_begin(), DominatingIP});
918 }
919
920 /// Set up the last cleanup that was pushed as a conditional
921 /// full-expression cleanup.
924 }
925
928
929 /// PushDestructorCleanup - Push a cleanup to call the
930 /// complete-object destructor of an object of the given type at the
931 /// given address. Does nothing if T is not a C++ class type with a
932 /// non-trivial destructor.
934
935 /// PushDestructorCleanup - Push a cleanup to call the
936 /// complete-object variant of the given destructor on the object at
937 /// the given address.
939 Address Addr);
940
941 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
942 /// process all branch fixups.
943 void PopCleanupBlock(bool FallThroughIsBranchThrough = false,
944 bool ForDeactivation = false);
945
946 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
947 /// The block cannot be reactivated. Pops it if it's the top of the
948 /// stack.
949 ///
950 /// \param DominatingIP - An instruction which is known to
951 /// dominate the current IP (if set) and which lies along
952 /// all paths of execution between the current IP and the
953 /// the point at which the cleanup comes into scope.
955 llvm::Instruction *DominatingIP);
956
957 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
958 /// Cannot be used to resurrect a deactivated cleanup.
959 ///
960 /// \param DominatingIP - An instruction which is known to
961 /// dominate the current IP (if set) and which lies along
962 /// all paths of execution between the current IP and the
963 /// the point at which the cleanup comes into scope.
965 llvm::Instruction *DominatingIP);
966
967 /// Enters a new scope for capturing cleanups, all of which
968 /// will be executed once the scope is exited.
970 EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
971 size_t LifetimeExtendedCleanupStackSize;
972 CleanupDeactivationScope DeactivateCleanups;
973 bool OldDidCallStackSave;
974 protected:
976 private:
977
978 RunCleanupsScope(const RunCleanupsScope &) = delete;
979 void operator=(const RunCleanupsScope &) = delete;
980
981 protected:
983
984 public:
985 /// Enter a new cleanup scope.
987 : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) {
988 CleanupStackDepth = CGF.EHStack.stable_begin();
989 LifetimeExtendedCleanupStackSize =
991 OldDidCallStackSave = CGF.DidCallStackSave;
992 CGF.DidCallStackSave = false;
993 OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
994 CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
995 }
996
997 /// Exit this cleanup scope, emitting any accumulated cleanups.
999 if (PerformCleanup)
1000 ForceCleanup();
1001 }
1002
1003 /// Determine whether this scope requires any cleanups.
1004 bool requiresCleanups() const {
1005 return CGF.EHStack.stable_begin() != CleanupStackDepth;
1006 }
1007
1008 /// Force the emission of cleanups now, instead of waiting
1009 /// until this object is destroyed.
1010 /// \param ValuesToReload - A list of values that need to be available at
1011 /// the insertion point after cleanup emission. If cleanup emission created
1012 /// a shared cleanup block, these value pointers will be rewritten.
1013 /// Otherwise, they not will be modified.
1014 void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
1015 assert(PerformCleanup && "Already forced cleanup");
1016 CGF.DidCallStackSave = OldDidCallStackSave;
1017 DeactivateCleanups.ForceDeactivate();
1018 CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
1019 ValuesToReload);
1020 PerformCleanup = false;
1021 CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
1022 }
1023 };
1024
1025 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1028
1030 SourceRange Range;
1032 LexicalScope *ParentScope;
1033
1034 LexicalScope(const LexicalScope &) = delete;
1035 void operator=(const LexicalScope &) = delete;
1036
1037 public:
1038 /// Enter a new cleanup scope.
1040 : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
1041 CGF.CurLexicalScope = this;
1042 if (CGDebugInfo *DI = CGF.getDebugInfo())
1043 DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
1044 }
1045
1046 void addLabel(const LabelDecl *label) {
1047 assert(PerformCleanup && "adding label to dead scope?");
1048 Labels.push_back(label);
1049 }
1050
1051 /// Exit this cleanup scope, emitting any accumulated
1052 /// cleanups.
1054 if (CGDebugInfo *DI = CGF.getDebugInfo())
1055 DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
1056
1057 // If we should perform a cleanup, force them now. Note that
1058 // this ends the cleanup scope before rescoping any labels.
1059 if (PerformCleanup) {
1060 ApplyDebugLocation DL(CGF, Range.getEnd());
1061 ForceCleanup();
1062 }
1063 }
1064
1065 /// Force the emission of cleanups now, instead of waiting
1066 /// until this object is destroyed.
1068 CGF.CurLexicalScope = ParentScope;
1070
1071 if (!Labels.empty())
1072 rescopeLabels();
1073 }
1074
1075 bool hasLabels() const {
1076 return !Labels.empty();
1077 }
1078
1079 void rescopeLabels();
1080 };
1081
1082 typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
1083
1084 /// The class used to assign some variables some temporarily addresses.
1086 DeclMapTy SavedLocals;
1087 DeclMapTy SavedTempAddresses;
1088 OMPMapVars(const OMPMapVars &) = delete;
1089 void operator=(const OMPMapVars &) = delete;
1090
1091 public:
1092 explicit OMPMapVars() = default;
1094 assert(SavedLocals.empty() && "Did not restored original addresses.");
1095 };
1096
1097 /// Sets the address of the variable \p LocalVD to be \p TempAddr in
1098 /// function \p CGF.
1099 /// \return true if at least one variable was set already, false otherwise.
1100 bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
1101 Address TempAddr) {
1102 LocalVD = LocalVD->getCanonicalDecl();
1103 // Only save it once.
1104 if (SavedLocals.count(LocalVD)) return false;
1105
1106 // Copy the existing local entry to SavedLocals.
1107 auto it = CGF.LocalDeclMap.find(LocalVD);
1108 if (it != CGF.LocalDeclMap.end())
1109 SavedLocals.try_emplace(LocalVD, it->second);
1110 else
1111 SavedLocals.try_emplace(LocalVD, Address::invalid());
1112
1113 // Generate the private entry.
1114 QualType VarTy = LocalVD->getType();
1115 if (VarTy->isReferenceType()) {
1116 Address Temp = CGF.CreateMemTemp(VarTy);
1117 CGF.Builder.CreateStore(TempAddr.emitRawPointer(CGF), Temp);
1118 TempAddr = Temp;
1119 }
1120 SavedTempAddresses.try_emplace(LocalVD, TempAddr);
1121
1122 return true;
1123 }
1124
1125 /// Applies new addresses to the list of the variables.
1126 /// \return true if at least one variable is using new address, false
1127 /// otherwise.
1129 copyInto(SavedTempAddresses, CGF.LocalDeclMap);
1130 SavedTempAddresses.clear();
1131 return !SavedLocals.empty();
1132 }
1133
1134 /// Restores original addresses of the variables.
1136 if (!SavedLocals.empty()) {
1137 copyInto(SavedLocals, CGF.LocalDeclMap);
1138 SavedLocals.clear();
1139 }
1140 }
1141
1142 private:
1143 /// Copy all the entries in the source map over the corresponding
1144 /// entries in the destination, which must exist.
1145 static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
1146 for (auto &[Decl, Addr] : Src) {
1147 if (!Addr.isValid())
1148 Dest.erase(Decl);
1149 else
1150 Dest.insert_or_assign(Decl, Addr);
1151 }
1152 }
1153 };
1154
1155 /// The scope used to remap some variables as private in the OpenMP loop body
1156 /// (or other captured region emitted without outlining), and to restore old
1157 /// vars back on exit.
1159 OMPMapVars MappedVars;
1160 OMPPrivateScope(const OMPPrivateScope &) = delete;
1161 void operator=(const OMPPrivateScope &) = delete;
1162
1163 public:
1164 /// Enter a new OpenMP private scope.
1166
1167 /// Registers \p LocalVD variable as a private with \p Addr as the address
1168 /// of the corresponding private variable. \p
1169 /// PrivateGen is the address of the generated private variable.
1170 /// \return true if the variable is registered as private, false if it has
1171 /// been privatized already.
1172 bool addPrivate(const VarDecl *LocalVD, Address Addr) {
1173 assert(PerformCleanup && "adding private to dead scope");
1174 return MappedVars.setVarAddr(CGF, LocalVD, Addr);
1175 }
1176
1177 /// Privatizes local variables previously registered as private.
1178 /// Registration is separate from the actual privatization to allow
1179 /// initializers use values of the original variables, not the private one.
1180 /// This is important, for example, if the private variable is a class
1181 /// variable initialized by a constructor that references other private
1182 /// variables. But at initialization original variables must be used, not
1183 /// private copies.
1184 /// \return true if at least one variable was privatized, false otherwise.
1185 bool Privatize() { return MappedVars.apply(CGF); }
1186
1189 restoreMap();
1190 }
1191
1192 /// Exit scope - all the mapped variables are restored.
1194 if (PerformCleanup)
1195 ForceCleanup();
1196 }
1197
1198 /// Checks if the global variable is captured in current function.
1199 bool isGlobalVarCaptured(const VarDecl *VD) const {
1200 VD = VD->getCanonicalDecl();
1201 return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
1202 }
1203
1204 /// Restore all mapped variables w/o clean up. This is usefully when we want
1205 /// to reference the original variables but don't want the clean up because
1206 /// that could emit lifetime end too early, causing backend issue #56913.
1207 void restoreMap() { MappedVars.restore(CGF); }
1208 };
1209
1210 /// Save/restore original map of previously emitted local vars in case when we
1211 /// need to duplicate emission of the same code several times in the same
1212 /// function for OpenMP code.
1214 CodeGenFunction &CGF;
1215 DeclMapTy SavedMap;
1216
1217 public:
1219 : CGF(CGF), SavedMap(CGF.LocalDeclMap) {}
1220 ~OMPLocalDeclMapRAII() { SavedMap.swap(CGF.LocalDeclMap); }
1221 };
1222
1223 /// Takes the old cleanup stack size and emits the cleanup blocks
1224 /// that have been added.
1225 void
1227 std::initializer_list<llvm::Value **> ValuesToReload = {});
1228
1229 /// Takes the old cleanup stack size and emits the cleanup blocks
1230 /// that have been added, then adds all lifetime-extended cleanups from
1231 /// the given position to the stack.
1232 void
1234 size_t OldLifetimeExtendedStackSize,
1235 std::initializer_list<llvm::Value **> ValuesToReload = {});
1236
1237 void ResolveBranchFixups(llvm::BasicBlock *Target);
1238
1239 /// The given basic block lies in the current EH scope, but may be a
1240 /// target of a potentially scope-crossing jump; get a stable handle
1241 /// to which we can perform this jump later.
1243 return JumpDest(Target,
1246 }
1247
1248 /// The given basic block lies in the current EH scope, but may be a
1249 /// target of a potentially scope-crossing jump; get a stable handle
1250 /// to which we can perform this jump later.
1251 JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
1253 }
1254
1255 /// EmitBranchThroughCleanup - Emit a branch from the current insert
1256 /// block through the normal cleanup handling code (if any) and then
1257 /// on to \arg Dest.
1259
1260 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
1261 /// specified destination obviously has no cleanups to run. 'false' is always
1262 /// a conservatively correct answer for this method.
1264
1265 /// popCatchScope - Pops the catch scope at the top of the EHScope
1266 /// stack, emitting any required code (other than the catch handlers
1267 /// themselves).
1269
1270 llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
1272 llvm::BasicBlock *
1274
1275 /// An object to manage conditionally-evaluated expressions.
1277 llvm::BasicBlock *StartBB;
1278
1279 public:
1281 : StartBB(CGF.Builder.GetInsertBlock()) {}
1282
1284 assert(CGF.OutermostConditional != this);
1285 if (!CGF.OutermostConditional)
1286 CGF.OutermostConditional = this;
1287 }
1288
1290 assert(CGF.OutermostConditional != nullptr);
1291 if (CGF.OutermostConditional == this)
1292 CGF.OutermostConditional = nullptr;
1293 }
1294
1295 /// Returns a block which will be executed prior to each
1296 /// evaluation of the conditional code.
1297 llvm::BasicBlock *getStartingBlock() const {
1298 return StartBB;
1299 }
1300 };
1301
1302 /// isInConditionalBranch - Return true if we're currently emitting
1303 /// one branch or the other of a conditional expression.
1304 bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
1305
1306 void setBeforeOutermostConditional(llvm::Value *value, Address addr,
1307 CodeGenFunction &CGF) {
1308 assert(isInConditionalBranch());
1309 llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
1310 auto store = new llvm::StoreInst(value, addr.emitRawPointer(CGF),
1311 block->back().getIterator());
1312 store->setAlignment(addr.getAlignment().getAsAlign());
1313 }
1314
1315 /// An RAII object to record that we're evaluating a statement
1316 /// expression.
1318 CodeGenFunction &CGF;
1319
1320 /// We have to save the outermost conditional: cleanups in a
1321 /// statement expression aren't conditional just because the
1322 /// StmtExpr is.
1323 ConditionalEvaluation *SavedOutermostConditional;
1324
1325 public:
1327 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
1328 CGF.OutermostConditional = nullptr;
1329 }
1330
1332 CGF.OutermostConditional = SavedOutermostConditional;
1333 CGF.EnsureInsertPoint();
1334 }
1335 };
1336
1337 /// An object which temporarily prevents a value from being
1338 /// destroyed by aggressive peephole optimizations that assume that
1339 /// all uses of a value have been realized in the IR.
1341 llvm::Instruction *Inst = nullptr;
1342 friend class CodeGenFunction;
1343
1344 public:
1346 };
1347
1348 /// A non-RAII class containing all the information about a bound
1349 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1350 /// this which makes individual mappings very simple; using this
1351 /// class directly is useful when you have a variable number of
1352 /// opaque values or don't want the RAII functionality for some
1353 /// reason.
1355 const OpaqueValueExpr *OpaqueValue;
1356 bool BoundLValue;
1358
1360 bool boundLValue)
1361 : OpaqueValue(ov), BoundLValue(boundLValue) {}
1362 public:
1363 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1364
1365 static bool shouldBindAsLValue(const Expr *expr) {
1366 // gl-values should be bound as l-values for obvious reasons.
1367 // Records should be bound as l-values because IR generation
1368 // always keeps them in memory. Expressions of function type
1369 // act exactly like l-values but are formally required to be
1370 // r-values in C.
1371 return expr->isGLValue() ||
1372 expr->getType()->isFunctionType() ||
1373 hasAggregateEvaluationKind(expr->getType());
1374 }
1375
1377 const OpaqueValueExpr *ov,
1378 const Expr *e) {
1379 if (shouldBindAsLValue(ov))
1380 return bind(CGF, ov, CGF.EmitLValue(e));
1381 return bind(CGF, ov, CGF.EmitAnyExpr(e));
1382 }
1383
1385 const OpaqueValueExpr *ov,
1386 const LValue &lv) {
1387 assert(shouldBindAsLValue(ov));
1388 CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1389 return OpaqueValueMappingData(ov, true);
1390 }
1391
1393 const OpaqueValueExpr *ov,
1394 const RValue &rv) {
1395 assert(!shouldBindAsLValue(ov));
1396 CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1397
1398 OpaqueValueMappingData data(ov, false);
1399
1400 // Work around an extremely aggressive peephole optimization in
1401 // EmitScalarConversion which assumes that all other uses of a
1402 // value are extant.
1403 data.Protection = CGF.protectFromPeepholes(rv);
1404
1405 return data;
1406 }
1407
1408 bool isValid() const { return OpaqueValue != nullptr; }
1409 void clear() { OpaqueValue = nullptr; }
1410
1412 assert(OpaqueValue && "no data to unbind!");
1413
1414 if (BoundLValue) {
1415 CGF.OpaqueLValues.erase(OpaqueValue);
1416 } else {
1417 CGF.OpaqueRValues.erase(OpaqueValue);
1418 CGF.unprotectFromPeepholes(Protection);
1419 }
1420 }
1421 };
1422
1423 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1425 CodeGenFunction &CGF;
1427
1428 public:
1429 static bool shouldBindAsLValue(const Expr *expr) {
1431 }
1432
1433 /// Build the opaque value mapping for the given conditional
1434 /// operator if it's the GNU ?: extension. This is a common
1435 /// enough pattern that the convenience operator is really
1436 /// helpful.
1437 ///
1439 const AbstractConditionalOperator *op) : CGF(CGF) {
1440 if (isa<ConditionalOperator>(op))
1441 // Leave Data empty.
1442 return;
1443
1444 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1446 e->getCommon());
1447 }
1448
1449 /// Build the opaque value mapping for an OpaqueValueExpr whose source
1450 /// expression is set to the expression the OVE represents.
1452 : CGF(CGF) {
1453 if (OV) {
1454 assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1455 "for OVE with no source expression");
1457 }
1458 }
1459
1461 const OpaqueValueExpr *opaqueValue,
1462 LValue lvalue)
1463 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1464 }
1465
1467 const OpaqueValueExpr *opaqueValue,
1468 RValue rvalue)
1469 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1470 }
1471
1472 void pop() {
1473 Data.unbind(CGF);
1474 Data.clear();
1475 }
1476
1478 if (Data.isValid()) Data.unbind(CGF);
1479 }
1480 };
1481
1482private:
1483 CGDebugInfo *DebugInfo;
1484 /// Used to create unique names for artificial VLA size debug info variables.
1485 unsigned VLAExprCounter = 0;
1486 bool DisableDebugInfo = false;
1487
1488 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1489 /// calling llvm.stacksave for multiple VLAs in the same scope.
1490 bool DidCallStackSave = false;
1491
1492 /// IndirectBranch - The first time an indirect goto is seen we create a block
1493 /// with an indirect branch. Every time we see the address of a label taken,
1494 /// we add the label to the indirect goto. Every subsequent indirect goto is
1495 /// codegen'd as a jump to the IndirectBranch's basic block.
1496 llvm::IndirectBrInst *IndirectBranch = nullptr;
1497
1498 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1499 /// decls.
1500 DeclMapTy LocalDeclMap;
1501
1502 // Keep track of the cleanups for callee-destructed parameters pushed to the
1503 // cleanup stack so that they can be deactivated later.
1504 llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
1505 CalleeDestructedParamCleanups;
1506
1507 /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1508 /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1509 /// parameter.
1510 llvm::SmallDenseMap<const ParmVarDecl *, const ImplicitParamDecl *, 2>
1511 SizeArguments;
1512
1513 /// Track escaped local variables with auto storage. Used during SEH
1514 /// outlining to produce a call to llvm.localescape.
1515 llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
1516
1517 /// LabelMap - This keeps track of the LLVM basic block for each C label.
1518 llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1519
1520 // BreakContinueStack - This keeps track of where break and continue
1521 // statements should jump to.
1522 struct BreakContinue {
1523 BreakContinue(JumpDest Break, JumpDest Continue)
1524 : BreakBlock(Break), ContinueBlock(Continue) {}
1525
1526 JumpDest BreakBlock;
1527 JumpDest ContinueBlock;
1528 };
1529 SmallVector<BreakContinue, 8> BreakContinueStack;
1530
1531 /// Handles cancellation exit points in OpenMP-related constructs.
1532 class OpenMPCancelExitStack {
1533 /// Tracks cancellation exit point and join point for cancel-related exit
1534 /// and normal exit.
1535 struct CancelExit {
1536 CancelExit() = default;
1537 CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
1538 JumpDest ContBlock)
1539 : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
1540 OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
1541 /// true if the exit block has been emitted already by the special
1542 /// emitExit() call, false if the default codegen is used.
1543 bool HasBeenEmitted = false;
1544 JumpDest ExitBlock;
1545 JumpDest ContBlock;
1546 };
1547
1548 SmallVector<CancelExit, 8> Stack;
1549
1550 public:
1551 OpenMPCancelExitStack() : Stack(1) {}
1552 ~OpenMPCancelExitStack() = default;
1553 /// Fetches the exit block for the current OpenMP construct.
1554 JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
1555 /// Emits exit block with special codegen procedure specific for the related
1556 /// OpenMP construct + emits code for normal construct cleanup.
1557 void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
1558 const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
1559 if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
1560 assert(CGF.getOMPCancelDestination(Kind).isValid());
1561 assert(CGF.HaveInsertPoint());
1562 assert(!Stack.back().HasBeenEmitted);
1563 auto IP = CGF.Builder.saveAndClearIP();
1564 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1565 CodeGen(CGF);
1566 CGF.EmitBranch(Stack.back().ContBlock.getBlock());
1567 CGF.Builder.restoreIP(IP);
1568 Stack.back().HasBeenEmitted = true;
1569 }
1570 CodeGen(CGF);
1571 }
1572 /// Enter the cancel supporting \a Kind construct.
1573 /// \param Kind OpenMP directive that supports cancel constructs.
1574 /// \param HasCancel true, if the construct has inner cancel directive,
1575 /// false otherwise.
1576 void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
1577 Stack.push_back({Kind,
1578 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
1579 : JumpDest(),
1580 HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
1581 : JumpDest()});
1582 }
1583 /// Emits default exit point for the cancel construct (if the special one
1584 /// has not be used) + join point for cancel/normal exits.
1585 void exit(CodeGenFunction &CGF) {
1586 if (getExitBlock().isValid()) {
1587 assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
1588 bool HaveIP = CGF.HaveInsertPoint();
1589 if (!Stack.back().HasBeenEmitted) {
1590 if (HaveIP)
1591 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1592 CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
1593 CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
1594 }
1595 CGF.EmitBlock(Stack.back().ContBlock.getBlock());
1596 if (!HaveIP) {
1597 CGF.Builder.CreateUnreachable();
1598 CGF.Builder.ClearInsertionPoint();
1599 }
1600 }
1601 Stack.pop_back();
1602 }
1603 };
1604 OpenMPCancelExitStack OMPCancelStack;
1605
1606 /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
1607 llvm::Value *emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
1608 Stmt::Likelihood LH);
1609
1610 CodeGenPGO PGO;
1611
1612 /// Bitmap used by MC/DC to track condition outcomes of a boolean expression.
1613 Address MCDCCondBitmapAddr = Address::invalid();
1614
1615 /// Calculate branch weights appropriate for PGO data
1616 llvm::MDNode *createProfileWeights(uint64_t TrueCount,
1617 uint64_t FalseCount) const;
1618 llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights) const;
1619 llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
1620 uint64_t LoopCount) const;
1621
1622public:
1623 /// Increment the profiler's counter for the given statement by \p StepV.
1624 /// If \p StepV is null, the default increment is 1.
1625 void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
1627 !CurFn->hasFnAttribute(llvm::Attribute::NoProfile) &&
1628 !CurFn->hasFnAttribute(llvm::Attribute::SkipProfile)) {
1629 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1630 PGO.emitCounterSetOrIncrement(Builder, S, StepV);
1631 }
1632 PGO.setCurrentStmt(S);
1633 }
1634
1637 CGM.getCodeGenOpts().MCDCCoverage &&
1638 !CurFn->hasFnAttribute(llvm::Attribute::NoProfile));
1639 }
1640
1641 /// Allocate a temp value on the stack that MCDC can use to track condition
1642 /// results.
1644 if (isMCDCCoverageEnabled()) {
1645 PGO.emitMCDCParameters(Builder);
1646 MCDCCondBitmapAddr =
1647 CreateIRTemp(getContext().UnsignedIntTy, "mcdc.addr");
1648 }
1649 }
1650
1651 bool isBinaryLogicalOp(const Expr *E) const {
1652 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(E->IgnoreParens());
1653 return (BOp && BOp->isLogicalOp());
1654 }
1655
1656 /// Zero-init the MCDC temp value.
1659 PGO.emitMCDCCondBitmapReset(Builder, E, MCDCCondBitmapAddr);
1660 PGO.setCurrentStmt(E);
1661 }
1662 }
1663
1664 /// Increment the profiler's counter for the given expression by \p StepV.
1665 /// If \p StepV is null, the default increment is 1.
1668 PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr, *this);
1669 PGO.setCurrentStmt(E);
1670 }
1671 }
1672
1673 /// Update the MCDC temp value with the condition's evaluated result.
1674 void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val) {
1675 if (isMCDCCoverageEnabled()) {
1676 PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val, *this);
1677 PGO.setCurrentStmt(E);
1678 }
1679 }
1680
1681 /// Get the profiler's count for the given statement.
1682 uint64_t getProfileCount(const Stmt *S) {
1683 return PGO.getStmtCount(S).value_or(0);
1684 }
1685
1686 /// Set the profiler's current count.
1687 void setCurrentProfileCount(uint64_t Count) {
1688 PGO.setCurrentRegionCount(Count);
1689 }
1690
1691 /// Get the profiler's current count. This is generally the count for the most
1692 /// recently incremented counter.
1694 return PGO.getCurrentRegionCount();
1695 }
1696
1697private:
1698
1699 /// SwitchInsn - This is nearest current switch instruction. It is null if
1700 /// current context is not in a switch.
1701 llvm::SwitchInst *SwitchInsn = nullptr;
1702 /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1703 SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
1704
1705 /// The likelihood attributes of the SwitchCase.
1706 SmallVector<Stmt::Likelihood, 16> *SwitchLikelihood = nullptr;
1707
1708 /// CaseRangeBlock - This block holds if condition check for last case
1709 /// statement range in current switch instruction.
1710 llvm::BasicBlock *CaseRangeBlock = nullptr;
1711
1712 /// OpaqueLValues - Keeps track of the current set of opaque value
1713 /// expressions.
1714 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1715 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1716
1717 // VLASizeMap - This keeps track of the associated size for each VLA type.
1718 // We track this by the size expression rather than the type itself because
1719 // in certain situations, like a const qualifier applied to an VLA typedef,
1720 // multiple VLA types can share the same size expression.
1721 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1722 // enter/leave scopes.
1723 llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1724
1725 /// A block containing a single 'unreachable' instruction. Created
1726 /// lazily by getUnreachableBlock().
1727 llvm::BasicBlock *UnreachableBlock = nullptr;
1728
1729 /// Counts of the number return expressions in the function.
1730 unsigned NumReturnExprs = 0;
1731
1732 /// Count the number of simple (constant) return expressions in the function.
1733 unsigned NumSimpleReturnExprs = 0;
1734
1735 /// The last regular (non-return) debug location (breakpoint) in the function.
1736 SourceLocation LastStopPoint;
1737
1738public:
1739 /// Source location information about the default argument or member
1740 /// initializer expression we're evaluating, if any.
1744
1745 /// A scope within which we are constructing the fields of an object which
1746 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1747 /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1749 public:
1751 : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
1752 CGF.CXXDefaultInitExprThis = This;
1753 }
1755 CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
1756 }
1757
1758 private:
1759 CodeGenFunction &CGF;
1760 Address OldCXXDefaultInitExprThis;
1761 };
1762
1763 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1764 /// is overridden to be the object under construction.
1766 public:
1768 : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
1769 OldCXXThisAlignment(CGF.CXXThisAlignment),
1771 CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer();
1772 CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
1773 }
1775 CGF.CXXThisValue = OldCXXThisValue;
1776 CGF.CXXThisAlignment = OldCXXThisAlignment;
1777 }
1778
1779 public:
1781 llvm::Value *OldCXXThisValue;
1784 };
1785
1789 };
1790
1791 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1792 /// current loop index is overridden.
1794 public:
1795 ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
1796 : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
1797 CGF.ArrayInitIndex = Index;
1798 }
1800 CGF.ArrayInitIndex = OldArrayInitIndex;
1801 }
1802
1803 private:
1804 CodeGenFunction &CGF;
1805 llvm::Value *OldArrayInitIndex;
1806 };
1807
1809 public:
1811 : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl),
1812 OldCurCodeDecl(CGF.CurCodeDecl),
1813 OldCXXABIThisDecl(CGF.CXXABIThisDecl),
1814 OldCXXABIThisValue(CGF.CXXABIThisValue),
1815 OldCXXThisValue(CGF.CXXThisValue),
1816 OldCXXABIThisAlignment(CGF.CXXABIThisAlignment),
1817 OldCXXThisAlignment(CGF.CXXThisAlignment),
1818 OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy),
1819 OldCXXInheritedCtorInitExprArgs(
1820 std::move(CGF.CXXInheritedCtorInitExprArgs)) {
1821 CGF.CurGD = GD;
1822 CGF.CurFuncDecl = CGF.CurCodeDecl =
1823 cast<CXXConstructorDecl>(GD.getDecl());
1824 CGF.CXXABIThisDecl = nullptr;
1825 CGF.CXXABIThisValue = nullptr;
1826 CGF.CXXThisValue = nullptr;
1827 CGF.CXXABIThisAlignment = CharUnits();
1828 CGF.CXXThisAlignment = CharUnits();
1830 CGF.FnRetTy = QualType();
1831 CGF.CXXInheritedCtorInitExprArgs.clear();
1832 }
1834 CGF.CurGD = OldCurGD;
1835 CGF.CurFuncDecl = OldCurFuncDecl;
1836 CGF.CurCodeDecl = OldCurCodeDecl;
1837 CGF.CXXABIThisDecl = OldCXXABIThisDecl;
1838 CGF.CXXABIThisValue = OldCXXABIThisValue;
1839 CGF.CXXThisValue = OldCXXThisValue;
1840 CGF.CXXABIThisAlignment = OldCXXABIThisAlignment;
1841 CGF.CXXThisAlignment = OldCXXThisAlignment;
1842 CGF.ReturnValue = OldReturnValue;
1843 CGF.FnRetTy = OldFnRetTy;
1844 CGF.CXXInheritedCtorInitExprArgs =
1845 std::move(OldCXXInheritedCtorInitExprArgs);
1846 }
1847
1848 private:
1849 CodeGenFunction &CGF;
1850 GlobalDecl OldCurGD;
1851 const Decl *OldCurFuncDecl;
1852 const Decl *OldCurCodeDecl;
1853 ImplicitParamDecl *OldCXXABIThisDecl;
1854 llvm::Value *OldCXXABIThisValue;
1855 llvm::Value *OldCXXThisValue;
1856 CharUnits OldCXXABIThisAlignment;
1857 CharUnits OldCXXThisAlignment;
1858 Address OldReturnValue;
1859 QualType OldFnRetTy;
1860 CallArgList OldCXXInheritedCtorInitExprArgs;
1861 };
1862
1863 // Helper class for the OpenMP IR Builder. Allows reusability of code used for
1864 // region body, and finalization codegen callbacks. This will class will also
1865 // contain privatization functions used by the privatization call backs
1866 //
1867 // TODO: this is temporary class for things that are being moved out of
1868 // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
1869 // utility function for use with the OMPBuilder. Once that move to use the
1870 // OMPBuilder is done, everything here will either become part of CodeGenFunc.
1871 // directly, or a new helper class that will contain functions used by both
1872 // this and the OMPBuilder
1873
1875
1879
1880 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1881
1882 /// Cleanup action for allocate support.
1884
1885 private:
1886 llvm::CallInst *RTLFnCI;
1887
1888 public:
1889 OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) {
1890 RLFnCI->removeFromParent();
1891 }
1892
1893 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
1894 if (!CGF.HaveInsertPoint())
1895 return;
1896 CGF.Builder.Insert(RTLFnCI);
1897 }
1898 };
1899
1900 /// Returns address of the threadprivate variable for the current
1901 /// thread. This Also create any necessary OMP runtime calls.
1902 ///
1903 /// \param VD VarDecl for Threadprivate variable.
1904 /// \param VDAddr Address of the Vardecl
1905 /// \param Loc The location where the barrier directive was encountered
1907 const VarDecl *VD, Address VDAddr,
1909
1910 /// Gets the OpenMP-specific address of the local variable /p VD.
1912 const VarDecl *VD);
1913 /// Get the platform-specific name separator.
1914 /// \param Parts different parts of the final name that needs separation
1915 /// \param FirstSeparator First separator used between the initial two
1916 /// parts of the name.
1917 /// \param Separator separator used between all of the rest consecutinve
1918 /// parts of the name
1919 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
1920 StringRef FirstSeparator = ".",
1921 StringRef Separator = ".");
1922 /// Emit the Finalization for an OMP region
1923 /// \param CGF The Codegen function this belongs to
1924 /// \param IP Insertion point for generating the finalization code.
1926 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1927 assert(IP.getBlock()->end() != IP.getPoint() &&
1928 "OpenMP IR Builder should cause terminated block!");
1929
1930 llvm::BasicBlock *IPBB = IP.getBlock();
1931 llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor();
1932 assert(DestBB && "Finalization block should have one successor!");
1933
1934 // erase and replace with cleanup branch.
1935 IPBB->getTerminator()->eraseFromParent();
1936 CGF.Builder.SetInsertPoint(IPBB);
1938 CGF.EmitBranchThroughCleanup(Dest);
1939 }
1940
1941 /// Emit the body of an OMP region
1942 /// \param CGF The Codegen function this belongs to
1943 /// \param RegionBodyStmt The body statement for the OpenMP region being
1944 /// generated
1945 /// \param AllocaIP Where to insert alloca instructions
1946 /// \param CodeGenIP Where to insert the region code
1947 /// \param RegionName Name to be used for new blocks
1949 const Stmt *RegionBodyStmt,
1950 InsertPointTy AllocaIP,
1951 InsertPointTy CodeGenIP,
1952 Twine RegionName);
1953
1954 static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP,
1955 llvm::BasicBlock &FiniBB, llvm::Function *Fn,
1957 llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
1958 if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
1959 CodeGenIPBBTI->eraseFromParent();
1960
1961 CGF.Builder.SetInsertPoint(CodeGenIPBB);
1962
1963 if (Fn->doesNotThrow())
1964 CGF.EmitNounwindRuntimeCall(Fn, Args);
1965 else
1966 CGF.EmitRuntimeCall(Fn, Args);
1967
1968 if (CGF.Builder.saveIP().isSet())
1969 CGF.Builder.CreateBr(&FiniBB);
1970 }
1971
1972 /// Emit the body of an OMP region that will be outlined in
1973 /// OpenMPIRBuilder::finalize().
1974 /// \param CGF The Codegen function this belongs to
1975 /// \param RegionBodyStmt The body statement for the OpenMP region being
1976 /// generated
1977 /// \param AllocaIP Where to insert alloca instructions
1978 /// \param CodeGenIP Where to insert the region code
1979 /// \param RegionName Name to be used for new blocks
1981 const Stmt *RegionBodyStmt,
1982 InsertPointTy AllocaIP,
1983 InsertPointTy CodeGenIP,
1984 Twine RegionName);
1985
1986 /// RAII for preserving necessary info during Outlined region body codegen.
1988
1989 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
1990 CodeGenFunction::JumpDest OldReturnBlock;
1991 CodeGenFunction &CGF;
1992
1993 public:
1995 llvm::BasicBlock &RetBB)
1996 : CGF(cgf) {
1997 assert(AllocaIP.isSet() &&
1998 "Must specify Insertion point for allocas of outlined function");
1999 OldAllocaIP = CGF.AllocaInsertPt;
2000 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2001
2002 OldReturnBlock = CGF.ReturnBlock;
2003 CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB);
2004 }
2005
2007 CGF.AllocaInsertPt = OldAllocaIP;
2008 CGF.ReturnBlock = OldReturnBlock;
2009 }
2010 };
2011
2012 /// RAII for preserving necessary info during inlined region body codegen.
2014
2015 llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
2016 CodeGenFunction &CGF;
2017
2018 public:
2020 llvm::BasicBlock &FiniBB)
2021 : CGF(cgf) {
2022 // Alloca insertion block should be in the entry block of the containing
2023 // function so it expects an empty AllocaIP in which case will reuse the
2024 // old alloca insertion point, or a new AllocaIP in the same block as
2025 // the old one
2026 assert((!AllocaIP.isSet() ||
2027 CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) &&
2028 "Insertion point should be in the entry block of containing "
2029 "function!");
2030 OldAllocaIP = CGF.AllocaInsertPt;
2031 if (AllocaIP.isSet())
2032 CGF.AllocaInsertPt = &*AllocaIP.getPoint();
2033
2034 // TODO: Remove the call, after making sure the counter is not used by
2035 // the EHStack.
2036 // Since this is an inlined region, it should not modify the
2037 // ReturnBlock, and should reuse the one for the enclosing outlined
2038 // region. So, the JumpDest being return by the function is discarded
2039 (void)CGF.getJumpDestInCurrentScope(&FiniBB);
2040 }
2041
2043 };
2044 };
2045
2046private:
2047 /// CXXThisDecl - When generating code for a C++ member function,
2048 /// this will hold the implicit 'this' declaration.
2049 ImplicitParamDecl *CXXABIThisDecl = nullptr;
2050 llvm::Value *CXXABIThisValue = nullptr;
2051 llvm::Value *CXXThisValue = nullptr;
2052 CharUnits CXXABIThisAlignment;
2053 CharUnits CXXThisAlignment;
2054
2055 /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
2056 /// this expression.
2057 Address CXXDefaultInitExprThis = Address::invalid();
2058
2059 /// The current array initialization index when evaluating an
2060 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
2061 llvm::Value *ArrayInitIndex = nullptr;
2062
2063 /// The values of function arguments to use when evaluating
2064 /// CXXInheritedCtorInitExprs within this context.
2065 CallArgList CXXInheritedCtorInitExprArgs;
2066
2067 /// CXXStructorImplicitParamDecl - When generating code for a constructor or
2068 /// destructor, this will hold the implicit argument (e.g. VTT).
2069 ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
2070 llvm::Value *CXXStructorImplicitParamValue = nullptr;
2071
2072 /// OutermostConditional - Points to the outermost active
2073 /// conditional control. This is used so that we know if a
2074 /// temporary should be destroyed conditionally.
2075 ConditionalEvaluation *OutermostConditional = nullptr;
2076
2077 /// The current lexical scope.
2078 LexicalScope *CurLexicalScope = nullptr;
2079
2080 /// The current source location that should be used for exception
2081 /// handling code.
2082 SourceLocation CurEHLocation;
2083
2084 /// BlockByrefInfos - For each __block variable, contains
2085 /// information about the layout of the variable.
2086 llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
2087
2088 /// Used by -fsanitize=nullability-return to determine whether the return
2089 /// value can be checked.
2090 llvm::Value *RetValNullabilityPrecondition = nullptr;
2091
2092 /// Check if -fsanitize=nullability-return instrumentation is required for
2093 /// this function.
2094 bool requiresReturnValueNullabilityCheck() const {
2095 return RetValNullabilityPrecondition;
2096 }
2097
2098 /// Used to store precise source locations for return statements by the
2099 /// runtime return value checks.
2100 Address ReturnLocation = Address::invalid();
2101
2102 /// Check if the return value of this function requires sanitization.
2103 bool requiresReturnValueCheck() const;
2104
2105 bool isInAllocaArgument(CGCXXABI &ABI, QualType Ty);
2106 bool hasInAllocaArg(const CXXMethodDecl *MD);
2107
2108 llvm::BasicBlock *TerminateLandingPad = nullptr;
2109 llvm::BasicBlock *TerminateHandler = nullptr;
2111
2112 /// Terminate funclets keyed by parent funclet pad.
2113 llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
2114
2115 /// Largest vector width used in ths function. Will be used to create a
2116 /// function attribute.
2117 unsigned LargestVectorWidth = 0;
2118
2119 /// True if we need emit the life-time markers. This is initially set in
2120 /// the constructor, but could be overwritten to true if this is a coroutine.
2121 bool ShouldEmitLifetimeMarkers;
2122
2123 /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
2124 /// the function metadata.
2125 void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn);
2126
2127public:
2128 CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
2130
2131 CodeGenTypes &getTypes() const { return CGM.getTypes(); }
2132 ASTContext &getContext() const { return CGM.getContext(); }
2134 if (DisableDebugInfo)
2135 return nullptr;
2136 return DebugInfo;
2137 }
2138 void disableDebugInfo() { DisableDebugInfo = true; }
2139 void enableDebugInfo() { DisableDebugInfo = false; }
2140
2142 return CGM.getCodeGenOpts().OptimizationLevel == 0;
2143 }
2144
2145 const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
2146
2147 /// Returns a pointer to the function's exception object and selector slot,
2148 /// which is assigned in every landing pad.
2151
2152 /// Returns the contents of the function's exception object and selector
2153 /// slots.
2154 llvm::Value *getExceptionFromSlot();
2155 llvm::Value *getSelectorFromSlot();
2156
2158
2159 llvm::BasicBlock *getUnreachableBlock() {
2160 if (!UnreachableBlock) {
2161 UnreachableBlock = createBasicBlock("unreachable");
2162 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
2163 }
2164 return UnreachableBlock;
2165 }
2166
2167 llvm::BasicBlock *getInvokeDest() {
2168 if (!EHStack.requiresLandingPad()) return nullptr;
2169 return getInvokeDestImpl();
2170 }
2171
2172 bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; }
2173
2174 const TargetInfo &getTarget() const { return Target; }
2175 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
2177 return CGM.getTargetCodeGenInfo();
2178 }
2179
2180 //===--------------------------------------------------------------------===//
2181 // Cleanups
2182 //===--------------------------------------------------------------------===//
2183
2184 typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
2185
2186 void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2187 Address arrayEndPointer,
2188 QualType elementType,
2189 CharUnits elementAlignment,
2190 Destroyer *destroyer);
2191 void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2192 llvm::Value *arrayEnd,
2193 QualType elementType,
2194 CharUnits elementAlignment,
2195 Destroyer *destroyer);
2196
2198 Address addr, QualType type);
2200 Address addr, QualType type);
2202 Destroyer *destroyer, bool useEHCleanupForArray);
2204 Address addr, QualType type);
2206 QualType type, Destroyer *destroyer,
2207 bool useEHCleanupForArray);
2209 QualType type, Destroyer *destroyer,
2210 bool useEHCleanupForArray);
2211 void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
2212 llvm::Value *CompletePtr,
2213 QualType ElementType);
2216 std::pair<llvm::Value *, llvm::Value *> AddrSizePair);
2218 bool useEHCleanupForArray);
2220 Destroyer *destroyer,
2221 bool useEHCleanupForArray,
2222 const VarDecl *VD);
2223 void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
2224 QualType elementType, CharUnits elementAlign,
2225 Destroyer *destroyer,
2226 bool checkZeroLength, bool useEHCleanup);
2227
2229
2230 /// Determines whether an EH cleanup is required to destroy a type
2231 /// with the given destruction kind.
2233 switch (kind) {
2234 case QualType::DK_none:
2235 return false;
2239 return getLangOpts().Exceptions;
2241 return getLangOpts().Exceptions &&
2242 CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
2243 }
2244 llvm_unreachable("bad destruction kind");
2245 }
2246
2249 }
2250
2251 //===--------------------------------------------------------------------===//
2252 // Objective-C
2253 //===--------------------------------------------------------------------===//
2254
2256
2258
2259 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
2261 const ObjCPropertyImplDecl *PID);
2263 const ObjCPropertyImplDecl *propImpl,
2264 const ObjCMethodDecl *GetterMothodDecl,
2265 llvm::Constant *AtomicHelperFn);
2266
2268 ObjCMethodDecl *MD, bool ctor);
2269
2270 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
2271 /// for the given property.
2273 const ObjCPropertyImplDecl *PID);
2275 const ObjCPropertyImplDecl *propImpl,
2276 llvm::Constant *AtomicHelperFn);
2277
2278 //===--------------------------------------------------------------------===//
2279 // Block Bits
2280 //===--------------------------------------------------------------------===//
2281
2282 /// Emit block literal.
2283 /// \return an LLVM value which is a pointer to a struct which contains
2284 /// information about the block, including the block invoke function, the
2285 /// captured variables, etc.
2286 llvm::Value *EmitBlockLiteral(const BlockExpr *);
2287
2289 const CGBlockInfo &Info,
2290 const DeclMapTy &ldm,
2291 bool IsLambdaConversionToBlock,
2292 bool BuildGlobalBlock);
2293
2294 /// Check if \p T is a C++ class that has a destructor that can throw.
2296
2297 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
2298 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
2300 const ObjCPropertyImplDecl *PID);
2302 const ObjCPropertyImplDecl *PID);
2303 llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
2304
2305 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
2306 bool CanThrow);
2307
2308 class AutoVarEmission;
2309
2311
2312 /// Enter a cleanup to destroy a __block variable. Note that this
2313 /// cleanup should be a no-op if the variable hasn't left the stack
2314 /// yet; if a cleanup is required for the variable itself, that needs
2315 /// to be done externally.
2316 ///
2317 /// \param Kind Cleanup kind.
2318 ///
2319 /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
2320 /// structure that will be passed to _Block_object_dispose. When
2321 /// \p LoadBlockVarAddr is true, the address of the field of the block
2322 /// structure that holds the address of the __block structure.
2323 ///
2324 /// \param Flags The flag that will be passed to _Block_object_dispose.
2325 ///
2326 /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
2327 /// \p Addr to get the address of the __block structure.
2329 bool LoadBlockVarAddr, bool CanThrow);
2330
2331 void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
2332 llvm::Value *ptr);
2333
2336
2337 /// BuildBlockByrefAddress - Computes the location of the
2338 /// data in a variable which is declared as __block.
2340 bool followForward = true);
2342 const BlockByrefInfo &info,
2343 bool followForward,
2344 const llvm::Twine &name);
2345
2347
2349
2350 void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
2351 const CGFunctionInfo &FnInfo);
2352
2353 /// Annotate the function with an attribute that disables TSan checking at
2354 /// runtime.
2355 void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
2356
2357 /// Emit code for the start of a function.
2358 /// \param Loc The location to be associated with the function.
2359 /// \param StartLoc The location of the function body.
2361 QualType RetTy,
2362 llvm::Function *Fn,
2363 const CGFunctionInfo &FnInfo,
2364 const FunctionArgList &Args,
2366 SourceLocation StartLoc = SourceLocation());
2367
2369
2373 void EmitFunctionBody(const Stmt *Body);
2374 void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
2375
2376 void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
2377 CallArgList &CallArgs,
2378 const CGFunctionInfo *CallOpFnInfo = nullptr,
2379 llvm::Constant *CallOpFn = nullptr);
2383 CallArgList &CallArgs);
2385 const CGFunctionInfo **ImplFnInfo,
2386 llvm::Function **ImplFn);
2389 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2390 }
2391 void EmitAsanPrologueOrEpilogue(bool Prologue);
2392
2393 /// Emit the unified return block, trying to avoid its emission when
2394 /// possible.
2395 /// \return The debug location of the user written return statement if the
2396 /// return block is avoided.
2397 llvm::DebugLoc EmitReturnBlock();
2398
2399 /// FinishFunction - Complete IR generation of the current function. It is
2400 /// legal to call this function even if there is no current insertion point.
2402
2403 void StartThunk(llvm::Function *Fn, GlobalDecl GD,
2404 const CGFunctionInfo &FnInfo, bool IsUnprototyped);
2405
2406 void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
2407 const ThunkInfo *Thunk, bool IsUnprototyped);
2408
2410
2411 /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
2412 void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
2413 llvm::FunctionCallee Callee);
2414
2415 /// Generate a thunk for the given method.
2416 void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
2417 GlobalDecl GD, const ThunkInfo &Thunk,
2418 bool IsUnprototyped);
2419
2420 llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
2421 const CGFunctionInfo &FnInfo,
2422 GlobalDecl GD, const ThunkInfo &Thunk);
2423
2425 FunctionArgList &Args);
2426
2428
2429 /// Struct with all information about dynamic [sub]class needed to set vptr.
2430 struct VPtr {
2435 };
2436
2437 /// Initialize the vtable pointer of the given subobject.
2439
2441
2444
2446 CharUnits OffsetFromNearestVBase,
2447 bool BaseIsNonVirtualPrimaryBase,
2448 const CXXRecordDecl *VTableClass,
2449 VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs);
2450
2452
2453 // VTableTrapMode - whether we guarantee that loading the
2454 // vtable is guaranteed to trap on authentication failure,
2455 // even if the resulting vtable pointer is unused.
2456 enum class VTableAuthMode {
2458 MustTrap,
2459 UnsafeUbsanStrip // Should only be used for Vptr UBSan check
2460 };
2461 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
2462 /// to by This.
2463 llvm::Value *
2464 GetVTablePtr(Address This, llvm::Type *VTableTy,
2465 const CXXRecordDecl *VTableClass,
2467
2476 };
2477
2478 /// Derived is the presumed address of an object of type T after a
2479 /// cast. If T is a polymorphic class type, emit a check that the virtual
2480 /// table for Derived belongs to a class derived from T.
2481 void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull,
2483
2484 /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
2485 /// If vptr CFI is enabled, emit a check that VTable is valid.
2486 void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable,
2488
2489 /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
2490 /// RD using llvm.type.test.
2491 void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
2493
2494 /// If whole-program virtual table optimization is enabled, emit an assumption
2495 /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
2496 /// enabled, emit a check that VTable is a member of RD's type identifier.
2498 llvm::Value *VTable, SourceLocation Loc);
2499
2500 /// Returns whether we should perform a type checked load when loading a
2501 /// virtual function for virtual calls to members of RD. This is generally
2502 /// true when both vcall CFI and whole-program-vtables are enabled.
2504
2505 /// Emit a type checked load from the given vtable.
2507 llvm::Value *VTable,
2508 llvm::Type *VTableTy,
2509 uint64_t VTableByteOffset);
2510
2511 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
2512 /// given phase of destruction for a destructor. The end result
2513 /// should call destructors on members and base classes in reverse
2514 /// order of their construction.
2516
2517 /// ShouldInstrumentFunction - Return true if the current function should be
2518 /// instrumented with __cyg_profile_func_* calls
2520
2521 /// ShouldSkipSanitizerInstrumentation - Return true if the current function
2522 /// should not be instrumented with sanitizers.
2524
2525 /// ShouldXRayInstrument - Return true if the current function should be
2526 /// instrumented with XRay nop sleds.
2528
2529 /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
2530 /// XRay custom event handling calls.
2532
2533 /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
2534 /// XRay typed event handling calls.
2536
2537 /// Return a type hash constant for a function instrumented by
2538 /// -fsanitize=function.
2539 llvm::ConstantInt *getUBSanFunctionTypeHash(QualType T) const;
2540
2541 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
2542 /// arguments for the given function. This is also responsible for naming the
2543 /// LLVM function arguments.
2545 llvm::Function *Fn,
2546 const FunctionArgList &Args);
2547
2548 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
2549 /// given temporary.
2550 void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
2551 SourceLocation EndLoc);
2552
2553 /// Emit a test that checks if the return value \p RV is nonnull.
2554 void EmitReturnValueCheck(llvm::Value *RV);
2555
2556 /// EmitStartEHSpec - Emit the start of the exception spec.
2557 void EmitStartEHSpec(const Decl *D);
2558
2559 /// EmitEndEHSpec - Emit the end of the exception spec.
2560 void EmitEndEHSpec(const Decl *D);
2561
2562 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2563 llvm::BasicBlock *getTerminateLandingPad();
2564
2565 /// getTerminateLandingPad - Return a cleanup funclet that just calls
2566 /// terminate.
2567 llvm::BasicBlock *getTerminateFunclet();
2568
2569 /// getTerminateHandler - Return a handler (not a landing pad, just
2570 /// a catch handler) that just calls terminate. This is used when
2571 /// a terminate scope encloses a try.
2572 llvm::BasicBlock *getTerminateHandler();
2573
2575 llvm::Type *ConvertType(QualType T);
2577 llvm::Type *LLVMTy = nullptr);
2578 llvm::Type *ConvertType(const TypeDecl *T) {
2579 return ConvertType(getContext().getTypeDeclType(T));
2580 }
2581
2582 /// LoadObjCSelf - Load the value of self. This function is only valid while
2583 /// generating code for an Objective-C method.
2584 llvm::Value *LoadObjCSelf();
2585
2586 /// TypeOfSelfObject - Return type of object that this self represents.
2588
2589 /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2591
2593 return getEvaluationKind(T) == TEK_Scalar;
2594 }
2595
2598 }
2599
2600 /// createBasicBlock - Create an LLVM basic block.
2601 llvm::BasicBlock *createBasicBlock(const Twine &name = "",
2602 llvm::Function *parent = nullptr,
2603 llvm::BasicBlock *before = nullptr) {
2604 return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
2605 }
2606
2607 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2608 /// label maps to.
2610
2611 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2612 /// another basic block, simplify it. This assumes that no other code could
2613 /// potentially reference the basic block.
2614 void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
2615
2616 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2617 /// adding a fall-through branch from the current insert block if
2618 /// necessary. It is legal to call this function even if there is no current
2619 /// insertion point.
2620 ///
2621 /// IsFinished - If true, indicates that the caller has finished emitting
2622 /// branches to the given block and does not expect to emit code into it. This
2623 /// means the block can be ignored if it is unreachable.
2624 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
2625
2626 /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2627 /// near its uses, and leave the insertion point in it.
2628 void EmitBlockAfterUses(llvm::BasicBlock *BB);
2629
2630 /// EmitBranch - Emit a branch to the specified basic block from the current
2631 /// insert block, taking care to avoid creation of branches from dummy
2632 /// blocks. It is legal to call this function even if there is no current
2633 /// insertion point.
2634 ///
2635 /// This function clears the current insertion point. The caller should follow
2636 /// calls to this function with calls to Emit*Block prior to generation new
2637 /// code.
2638 void EmitBranch(llvm::BasicBlock *Block);
2639
2640 /// HaveInsertPoint - True if an insertion point is defined. If not, this
2641 /// indicates that the current code being emitted is unreachable.
2642 bool HaveInsertPoint() const {
2643 return Builder.GetInsertBlock() != nullptr;
2644 }
2645
2646 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2647 /// emitted IR has a place to go. Note that by definition, if this function
2648 /// creates a block then that block is unreachable; callers may do better to
2649 /// detect when no insertion point is defined and simply skip IR generation.
2651 if (!HaveInsertPoint())
2653 }
2654
2655 /// ErrorUnsupported - Print out an error that codegen doesn't support the
2656 /// specified stmt yet.
2657 void ErrorUnsupported(const Stmt *S, const char *Type);
2658
2659 //===--------------------------------------------------------------------===//
2660 // Helpers
2661 //===--------------------------------------------------------------------===//
2662
2664 llvm::BasicBlock *LHSBlock,
2665 llvm::BasicBlock *RHSBlock,
2666 llvm::BasicBlock *MergeBlock,
2667 QualType MergedType) {
2668 Builder.SetInsertPoint(MergeBlock);
2669 llvm::PHINode *PtrPhi = Builder.CreatePHI(LHS.getType(), 2, "cond");
2670 PtrPhi->addIncoming(LHS.getBasePointer(), LHSBlock);
2671 PtrPhi->addIncoming(RHS.getBasePointer(), RHSBlock);
2672 LHS.replaceBasePointer(PtrPhi);
2673 LHS.setAlignment(std::min(LHS.getAlignment(), RHS.getAlignment()));
2674 return LHS;
2675 }
2676
2677 /// Construct an address with the natural alignment of T. If a pointer to T
2678 /// is expected to be signed, the pointer passed to this function must have
2679 /// been signed, and the returned Address will have the pointer authentication
2680 /// information needed to authenticate the signed pointer.
2682 llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(),
2683 bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr,
2684 TBAAAccessInfo *TBAAInfo = nullptr,
2685 KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
2686 if (Alignment.isZero())
2687 Alignment =
2688 CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, ForPointeeType);
2689 return Address(Ptr, ConvertTypeForMem(T), Alignment,
2690 CGM.getPointerAuthInfoForPointeeType(T), /*Offset=*/nullptr,
2691 IsKnownNonNull);
2692 }
2693
2696 return MakeAddrLValue(Addr, T, LValueBaseInfo(Source),
2698 }
2699
2701 TBAAAccessInfo TBAAInfo) {
2702 return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
2703 }
2704
2705 LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2707 return MakeAddrLValue(makeNaturalAddressForPointer(V, T, Alignment), T,
2709 }
2710
2711 /// Same as MakeAddrLValue above except that the pointer is known to be
2712 /// unsigned.
2713 LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
2715 Address Addr(V, ConvertTypeForMem(T), Alignment);
2716 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2718 }
2719
2720 LValue
2723 return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
2724 TBAAAccessInfo());
2725 }
2726
2727 /// Given a value of type T* that may not be to a complete object, construct
2728 /// an l-value with the natural pointee alignment of T.
2730
2731 LValue
2733 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
2734
2735 /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
2736 /// to be unsigned.
2738
2740
2742 LValueBaseInfo *PointeeBaseInfo = nullptr,
2743 TBAAAccessInfo *PointeeTBAAInfo = nullptr);
2746 AlignmentSource Source =
2748 LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source),
2749 CGM.getTBAAAccessInfo(RefTy));
2750 return EmitLoadOfReferenceLValue(RefLVal);
2751 }
2752
2753 /// Load a pointer with type \p PtrTy stored at address \p Ptr.
2754 /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
2755 /// it is loaded from.
2757 LValueBaseInfo *BaseInfo = nullptr,
2758 TBAAAccessInfo *TBAAInfo = nullptr);
2760
2761private:
2762 struct AllocaTracker {
2763 void Add(llvm::AllocaInst *I) { Allocas.push_back(I); }
2764 llvm::SmallVector<llvm::AllocaInst *> Take() { return std::move(Allocas); }
2765
2766 private:
2768 };
2769 AllocaTracker *Allocas = nullptr;
2770
2771public:
2772 // Captures all the allocas created during the scope of its RAII object.
2775 : CGF(CGF), OldTracker(CGF.Allocas) {
2776 CGF.Allocas = &Tracker;
2777 }
2778 ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; }
2779
2780 llvm::SmallVector<llvm::AllocaInst *> Take() { return Tracker.Take(); }
2781
2782 private:
2783 CodeGenFunction &CGF;
2784 AllocaTracker *OldTracker;
2785 AllocaTracker Tracker;
2786 };
2787
2788 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2789 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2790 /// insertion point of the builder. The caller is responsible for setting an
2791 /// appropriate alignment on
2792 /// the alloca.
2793 ///
2794 /// \p ArraySize is the number of array elements to be allocated if it
2795 /// is not nullptr.
2796 ///
2797 /// LangAS::Default is the address space of pointers to local variables and
2798 /// temporaries, as exposed in the source language. In certain
2799 /// configurations, this is not the same as the alloca address space, and a
2800 /// cast is needed to lift the pointer from the alloca AS into
2801 /// LangAS::Default. This can happen when the target uses a restricted
2802 /// address space for the stack but the source language requires
2803 /// LangAS::Default to be a generic address space. The latter condition is
2804 /// common for most programming languages; OpenCL is an exception in that
2805 /// LangAS::Default is the private address space, which naturally maps
2806 /// to the stack.
2807 ///
2808 /// Because the address of a temporary is often exposed to the program in
2809 /// various ways, this function will perform the cast. The original alloca
2810 /// instruction is returned through \p Alloca if it is not nullptr.
2811 ///
2812 /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2813 /// more efficient if the caller knows that the address will not be exposed.
2814 llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
2815 llvm::Value *ArraySize = nullptr);
2817 const Twine &Name = "tmp",
2818 llvm::Value *ArraySize = nullptr,
2819 RawAddress *Alloca = nullptr);
2821 const Twine &Name = "tmp",
2822 llvm::Value *ArraySize = nullptr);
2823
2824 /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2825 /// default ABI alignment of the given LLVM type.
2826 ///
2827 /// IMPORTANT NOTE: This is *not* generally the right alignment for
2828 /// any given AST type that happens to have been lowered to the
2829 /// given IR type. This should only ever be used for function-local,
2830 /// IR-driven manipulations like saving and restoring a value. Do
2831 /// not hand this address off to arbitrary IRGen routines, and especially
2832 /// do not pass it as an argument to a function that might expect a
2833 /// properly ABI-aligned value.
2835 const Twine &Name = "tmp");
2836
2837 /// CreateIRTemp - Create a temporary IR object of the given type, with
2838 /// appropriate alignment. This routine should only be used when an temporary
2839 /// value needs to be stored into an alloca (for example, to avoid explicit
2840 /// PHI construction), but the type is the IR type, not the type appropriate
2841 /// for storing in memory.
2842 ///
2843 /// That is, this is exactly equivalent to CreateMemTemp, but calling
2844 /// ConvertType instead of ConvertTypeForMem.
2845 RawAddress CreateIRTemp(QualType T, const Twine &Name = "tmp");
2846
2847 /// CreateMemTemp - Create a temporary memory object of the given type, with
2848 /// appropriate alignmen and cast it to the default address space. Returns
2849 /// the original alloca instruction by \p Alloca if it is not nullptr.
2850 RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp",
2851 RawAddress *Alloca = nullptr);
2853 const Twine &Name = "tmp",
2854 RawAddress *Alloca = nullptr);
2855
2856 /// CreateMemTemp - Create a temporary memory object of the given type, with
2857 /// appropriate alignmen without casting it to the default address space.
2858 RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
2860 const Twine &Name = "tmp");
2861
2862 /// CreateAggTemp - Create a temporary memory object for the given
2863 /// aggregate type.
2864 AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
2865 RawAddress *Alloca = nullptr) {
2866 return AggValueSlot::forAddr(
2867 CreateMemTemp(T, Name, Alloca), T.getQualifiers(),
2870 }
2871
2872 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2873 /// expression and compare the result against zero, returning an Int1Ty value.
2874 llvm::Value *EvaluateExprAsBool(const Expr *E);
2875
2876 /// Retrieve the implicit cast expression of the rhs in a binary operator
2877 /// expression by passing pointers to Value and QualType
2878 /// This is used for implicit bitfield conversion checks, which
2879 /// must compare with the value before potential truncation.
2881 llvm::Value **Previous,
2882 QualType *SrcType);
2883
2884 /// Emit a check that an [implicit] conversion of a bitfield. It is not UB,
2885 /// so we use the value after conversion.
2886 void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType,
2887 llvm::Value *Dst, QualType DstType,
2888 const CGBitFieldInfo &Info,
2890
2891 /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
2892 void EmitIgnoredExpr(const Expr *E);
2893
2894 /// EmitAnyExpr - Emit code to compute the specified expression which can have
2895 /// any type. The result is returned as an RValue struct. If this is an
2896 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2897 /// the result should be returned.
2898 ///
2899 /// \param ignoreResult True if the resulting value isn't used.
2902 bool ignoreResult = false);
2903
2904 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2905 // or the value of the expression, depending on how va_list is defined.
2907
2908 /// Emit a "reference" to a __builtin_ms_va_list; this is
2909 /// always the value of the expression, because a __builtin_ms_va_list is a
2910 /// pointer to a char.
2912
2913 /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2914 /// always be accessible even if no aggregate location is provided.
2916
2917 /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2918 /// arbitrary expression into the given memory location.
2919 void EmitAnyExprToMem(const Expr *E, Address Location,
2920 Qualifiers Quals, bool IsInitializer);
2921
2922 void EmitAnyExprToExn(const Expr *E, Address Addr);
2923
2924 /// EmitInitializationToLValue - Emit an initializer to an LValue.
2926 const Expr *E, LValue LV,
2928
2929 /// EmitExprAsInit - Emits the code necessary to initialize a
2930 /// location in memory with the given initializer.
2931 void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
2932 bool capturedByInit);
2933
2934 /// hasVolatileMember - returns true if aggregate type has a volatile
2935 /// member.
2937 if (const RecordType *RT = T->getAs<RecordType>()) {
2938 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
2939 return RD->hasVolatileMember();
2940 }
2941 return false;
2942 }
2943
2944 /// Determine whether a return value slot may overlap some other object.
2946 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2947 // class subobjects. These cases may need to be revisited depending on the
2948 // resolution of the relevant core issue.
2950 }
2951
2952 /// Determine whether a field initialization may overlap some other object.
2954
2955 /// Determine whether a base class initialization may overlap some other
2956 /// object.
2958 const CXXRecordDecl *BaseRD,
2959 bool IsVirtual);
2960
2961 /// Emit an aggregate assignment.
2963 bool IsVolatile = hasVolatileMember(EltTy);
2964 EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
2965 }
2966
2968 AggValueSlot::Overlap_t MayOverlap) {
2969 EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
2970 }
2971
2972 /// EmitAggregateCopy - Emit an aggregate copy.
2973 ///
2974 /// \param isVolatile \c true iff either the source or the destination is
2975 /// volatile.
2976 /// \param MayOverlap Whether the tail padding of the destination might be
2977 /// occupied by some other object. More efficient code can often be
2978 /// generated if not.
2980 AggValueSlot::Overlap_t MayOverlap,
2981 bool isVolatile = false);
2982
2983 /// GetAddrOfLocalVar - Return the address of a local variable.
2985 auto it = LocalDeclMap.find(VD);
2986 assert(it != LocalDeclMap.end() &&
2987 "Invalid argument to GetAddrOfLocalVar(), no decl!");
2988 return it->second;
2989 }
2990
2991 /// Given an opaque value expression, return its LValue mapping if it exists,
2992 /// otherwise create one.
2994
2995 /// Given an opaque value expression, return its RValue mapping if it exists,
2996 /// otherwise create one.
2998
2999 /// Get the index of the current ArrayInitLoopExpr, if any.
3000 llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
3001
3002 /// getAccessedFieldNo - Given an encoded value and a result number, return
3003 /// the input field number being accessed.
3004 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
3005
3006 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
3007 llvm::BasicBlock *GetIndirectGotoBlock();
3008
3009 /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
3010 static bool IsWrappedCXXThis(const Expr *E);
3011
3012 /// EmitNullInitialization - Generate code to set a value of the given type to
3013 /// null, If the type contains data member pointers, they will be initialized
3014 /// to -1 in accordance with the Itanium C++ ABI.
3016
3017 /// Emits a call to an LLVM variable-argument intrinsic, either
3018 /// \c llvm.va_start or \c llvm.va_end.
3019 /// \param ArgValue A reference to the \c va_list as emitted by either
3020 /// \c EmitVAListRef or \c EmitMSVAListRef.
3021 /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
3022 /// calls \c llvm.va_end.
3023 llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart);
3024
3025 /// Generate code to get an argument from the passed in pointer
3026 /// and update it accordingly.
3027 /// \param VE The \c VAArgExpr for which to generate code.
3028 /// \param VAListAddr Receives a reference to the \c va_list as emitted by
3029 /// either \c EmitVAListRef or \c EmitMSVAListRef.
3030 /// \returns A pointer to the argument.
3031 // FIXME: We should be able to get rid of this method and use the va_arg
3032 // instruction in LLVM instead once it works well enough.
3035
3036 /// emitArrayLength - Compute the length of an array, even if it's a
3037 /// VLA, and drill down to the base element type.
3039 QualType &baseType,
3040 Address &addr);
3041
3042 /// EmitVLASize - Capture all the sizes for the VLA expressions in
3043 /// the given variably-modified type and store them in the VLASizeMap.
3044 ///
3045 /// This function can be called with a null (unreachable) insert point.
3047
3049 llvm::Value *NumElts;
3051
3052 VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
3053 };
3054
3055 /// Return the number of elements for a single dimension
3056 /// for the given array type.
3059
3060 /// Returns an LLVM value that corresponds to the size,
3061 /// in non-variably-sized elements, of a variable length array type,
3062 /// plus that largest non-variably-sized element type. Assumes that
3063 /// the type has already been emitted with EmitVariablyModifiedType.
3066
3067 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
3068 /// generating code for an C++ member function.
3069 llvm::Value *LoadCXXThis() {
3070 assert(CXXThisValue && "no 'this' value for this function");
3071 return CXXThisValue;
3072 }
3074
3075 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
3076 /// virtual bases.
3077 // FIXME: Every place that calls LoadCXXVTT is something
3078 // that needs to be abstracted properly.
3079 llvm::Value *LoadCXXVTT() {
3080 assert(CXXStructorImplicitParamValue && "no VTT value for this function");
3081 return CXXStructorImplicitParamValue;
3082 }
3083
3084 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
3085 /// complete class to the given direct base.
3086 Address
3088 const CXXRecordDecl *Derived,
3089 const CXXRecordDecl *Base,
3090 bool BaseIsVirtual);
3091
3092 static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
3093
3094 /// GetAddressOfBaseClass - This function will add the necessary delta to the
3095 /// load of 'this' and returns address of the base class.
3097 const CXXRecordDecl *Derived,
3100 bool NullCheckValue, SourceLocation Loc);
3101
3103 const CXXRecordDecl *Derived,
3106 bool NullCheckValue);
3107
3108 /// GetVTTParameter - Return the VTT parameter that should be passed to a
3109 /// base constructor/destructor with virtual bases.
3110 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
3111 /// to ItaniumCXXABI.cpp together with all the references to VTT.
3112 llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
3113 bool Delegating);
3114
3116 CXXCtorType CtorType,
3117 const FunctionArgList &Args,
3119 // It's important not to confuse this and the previous function. Delegating
3120 // constructors are the C++0x feature. The constructor delegate optimization
3121 // is used to reduce duplication in the base and complete consturctors where
3122 // they are substantially the same.
3124 const FunctionArgList &Args);
3125
3126 /// Emit a call to an inheriting constructor (that is, one that invokes a
3127 /// constructor inherited from a base class) by inlining its definition. This
3128 /// is necessary if the ABI does not support forwarding the arguments to the
3129 /// base class constructor (because they're variadic or similar).
3131 CXXCtorType CtorType,
3132 bool ForVirtualBase,
3133 bool Delegating,
3134 CallArgList &Args);
3135
3136 /// Emit a call to a constructor inherited from a base class, passing the
3137 /// current constructor's arguments along unmodified (without even making
3138 /// a copy).
3140 bool ForVirtualBase, Address This,
3141 bool InheritedFromVBase,
3143
3145 bool ForVirtualBase, bool Delegating,
3146 AggValueSlot ThisAVS, const CXXConstructExpr *E);
3147
3149 bool ForVirtualBase, bool Delegating,
3150 Address This, CallArgList &Args,
3152 SourceLocation Loc, bool NewPointerIsChecked,
3153 llvm::CallBase **CallOrInvoke = nullptr);
3154
3155 /// Emit assumption load for all bases. Requires to be called only on
3156 /// most-derived class and not under construction of the object.
3158
3159 /// Emit assumption that vptr load == global vtable.
3160 void EmitVTableAssumptionLoad(const VPtr &vptr, Address This);
3161
3163 Address This, Address Src,
3164 const CXXConstructExpr *E);
3165
3167 const ArrayType *ArrayTy,
3168 Address ArrayPtr,
3169 const CXXConstructExpr *E,
3170 bool NewPointerIsChecked,
3171 bool ZeroInitialization = false);
3172
3174 llvm::Value *NumElements,
3175 Address ArrayPtr,
3176 const CXXConstructExpr *E,
3177 bool NewPointerIsChecked,
3178 bool ZeroInitialization = false);
3179
3181
3183 bool ForVirtualBase, bool Delegating, Address This,
3184 QualType ThisTy);
3185
3187 llvm::Type *ElementTy, Address NewPtr,
3188 llvm::Value *NumElements,
3189 llvm::Value *AllocSizeWithoutCookie);
3190
3191 void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
3192 Address Ptr);
3193
3198
3199 llvm::Value *EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr);
3200 void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
3201
3202 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
3204
3205 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
3206 QualType DeleteTy, llvm::Value *NumElements = nullptr,
3207 CharUnits CookieSize = CharUnits());
3208
3210 const CallExpr *TheCallExpr, bool IsDelete);
3211
3212 llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
3213 llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
3215
3216 /// Situations in which we might emit a check for the suitability of a
3217 /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
3218 /// compiler-rt.
3220 /// Checking the operand of a load. Must be suitably sized and aligned.
3222 /// Checking the destination of a store. Must be suitably sized and aligned.
3224 /// Checking the bound value in a reference binding. Must be suitably sized
3225 /// and aligned, but is not required to refer to an object (until the
3226 /// reference is used), per core issue 453.
3228 /// Checking the object expression in a non-static data member access. Must
3229 /// be an object within its lifetime.
3231 /// Checking the 'this' pointer for a call to a non-static member function.
3232 /// Must be an object within its lifetime.
3234 /// Checking the 'this' pointer for a constructor call.
3236 /// Checking the operand of a static_cast to a derived pointer type. Must be
3237 /// null or an object within its lifetime.
3239 /// Checking the operand of a static_cast to a derived reference type. Must
3240 /// be an object within its lifetime.
3242 /// Checking the operand of a cast to a base object. Must be suitably sized
3243 /// and aligned.
3245 /// Checking the operand of a cast to a virtual base object. Must be an
3246 /// object within its lifetime.
3248 /// Checking the value assigned to a _Nonnull pointer. Must not be null.
3250 /// Checking the operand of a dynamic_cast or a typeid expression. Must be
3251 /// null or an object within its lifetime.
3254
3255 /// Determine whether the pointer type check \p TCK permits null pointers.
3257
3258 /// Determine whether the pointer type check \p TCK requires a vptr check.
3260
3261 /// Whether any type-checking sanitizers are enabled. If \c false,
3262 /// calls to EmitTypeCheck can be skipped.
3264
3266 QualType Type, SanitizerSet SkippedChecks = SanitizerSet(),
3267 llvm::Value *ArraySize = nullptr) {
3269 return;
3270 EmitTypeCheck(TCK, Loc, LV.emitRawPointer(*this), Type, LV.getAlignment(),
3271 SkippedChecks, ArraySize);
3272 }
3273
3275 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3276 SanitizerSet SkippedChecks = SanitizerSet(),
3277 llvm::Value *ArraySize = nullptr) {
3279 return;
3280 EmitTypeCheck(TCK, Loc, Addr.emitRawPointer(*this), Type, Alignment,
3281 SkippedChecks, ArraySize);
3282 }
3283
3284 /// Emit a check that \p V is the address of storage of the
3285 /// appropriate size and alignment for an object of type \p Type
3286 /// (or if ArraySize is provided, for an array of that bound).
3288 QualType Type, CharUnits Alignment = CharUnits::Zero(),
3289 SanitizerSet SkippedChecks = SanitizerSet(),
3290 llvm::Value *ArraySize = nullptr);
3291
3292 /// Emit a check that \p Base points into an array object, which
3293 /// we can access at index \p Index. \p Accessed should be \c false if we
3294 /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
3295 void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
3296 QualType IndexType, bool Accessed);
3297 void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
3298 llvm::Value *Index, QualType IndexType,
3299 QualType IndexedType, bool Accessed);
3300
3301 // Find a struct's flexible array member and get its offset. It may be
3302 // embedded inside multiple sub-structs, but must still be the last field.
3303 const FieldDecl *
3305 const FieldDecl *FAMDecl,
3306 uint64_t &Offset);
3307
3309 const FieldDecl *FAMDecl,
3310 const FieldDecl *CountDecl);
3311
3312 /// Build an expression accessing the "counted_by" field.
3314 const FieldDecl *FAMDecl,
3315 const FieldDecl *CountDecl);
3316
3318 bool isInc, bool isPre);
3320 bool isInc, bool isPre);
3321
3322 /// Converts Location to a DebugLoc, if debug information is enabled.
3323 llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
3324
3325 /// Get the record field index as represented in debug info.
3326 unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex);
3327
3328
3329 //===--------------------------------------------------------------------===//
3330 // Declaration Emission
3331 //===--------------------------------------------------------------------===//
3332
3333 /// EmitDecl - Emit a declaration.
3334 ///
3335 /// This function can be called with a null (unreachable) insert point.
3336 void EmitDecl(const Decl &D);
3337
3338 /// EmitVarDecl - Emit a local variable declaration.
3339 ///
3340 /// This function can be called with a null (unreachable) insert point.
3341 void EmitVarDecl(const VarDecl &D);
3342
3343 void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
3344 bool capturedByInit);
3345
3347 llvm::Value *Address);
3348
3349 /// Determine whether the given initializer is trivial in the sense
3350 /// that it requires no code to be generated.
3352
3353 /// EmitAutoVarDecl - Emit an auto variable declaration.
3354 ///
3355 /// This function can be called with a null (unreachable) insert point.
3357
3359 friend class CodeGenFunction;
3360
3361 const VarDecl *Variable;
3362
3363 /// The address of the alloca for languages with explicit address space
3364 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
3365 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
3366 /// as a global constant.
3367 Address Addr;
3368
3369 llvm::Value *NRVOFlag;
3370
3371 /// True if the variable is a __block variable that is captured by an
3372 /// escaping block.
3373 bool IsEscapingByRef;
3374
3375 /// True if the variable is of aggregate type and has a constant
3376 /// initializer.
3377 bool IsConstantAggregate;
3378
3379 /// Non-null if we should use lifetime annotations.
3380 llvm::Value *SizeForLifetimeMarkers;
3381
3382 /// Address with original alloca instruction. Invalid if the variable was
3383 /// emitted as a global constant.
3384 RawAddress AllocaAddr;
3385
3386 struct Invalid {};
3387 AutoVarEmission(Invalid)
3388 : Variable(nullptr), Addr(Address::invalid()),
3389 AllocaAddr(RawAddress::invalid()) {}
3390
3391 AutoVarEmission(const VarDecl &variable)
3392 : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
3393 IsEscapingByRef(false), IsConstantAggregate(false),
3394 SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
3395
3396 bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
3397
3398 public:
3399 static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
3400
3401 bool useLifetimeMarkers() const {
3402 return SizeForLifetimeMarkers != nullptr;
3403 }
3404 llvm::Value *getSizeForLifetimeMarkers() const {
3405 assert(useLifetimeMarkers());
3406 return SizeForLifetimeMarkers;
3407 }
3408
3409 /// Returns the raw, allocated address, which is not necessarily
3410 /// the address of the object itself. It is casted to default
3411 /// address space for address space agnostic languages.
3413 return Addr;
3414 }
3415
3416 /// Returns the address for the original alloca instruction.
3417 RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; }
3418
3419 /// Returns the address of the object within this declaration.
3420 /// Note that this does not chase the forwarding pointer for
3421 /// __block decls.
3423 if (!IsEscapingByRef) return Addr;
3424
3425 return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
3426 }
3427 };
3429 void EmitAutoVarInit(const AutoVarEmission &emission);
3432 QualType::DestructionKind dtorKind);
3433
3434 /// Emits the alloca and debug information for the size expressions for each
3435 /// dimension of an array. It registers the association of its (1-dimensional)
3436 /// QualTypes and size expression's debug node, so that CGDebugInfo can
3437 /// reference this node when creating the DISubrange object to describe the
3438 /// array types.
3440 const VarDecl &D,
3441 bool EmitDebugInfo);
3442
3444 llvm::GlobalValue::LinkageTypes Linkage);
3445
3447 union {
3449 llvm::Value *Value;
3450 };
3451
3452 bool IsIndirect;
3453
3454 ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {}
3455 ParamValue(Address A) : Addr(A), IsIndirect(true) {}
3456
3457 public:
3458 static ParamValue forDirect(llvm::Value *value) {
3459 return ParamValue(value);
3460 }
3462 assert(!addr.getAlignment().isZero());
3463 return ParamValue(addr);
3464 }
3465
3466 bool isIndirect() const { return IsIndirect; }
3467 llvm::Value *getAnyValue() const {
3468 if (!isIndirect())
3469 return Value;
3470 assert(!Addr.hasOffset() && "unexpected offset");
3471 return Addr.getBasePointer();
3472 }
3473
3474 llvm::Value *getDirectValue() const {
3475 assert(!isIndirect());
3476 return Value;
3477 }
3478
3480 assert(isIndirect());
3481 return Addr;
3482 }
3483 };
3484
3485 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
3486 void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
3487
3488 /// protectFromPeepholes - Protect a value that we're intending to
3489 /// store to the side, but which will probably be used later, from
3490 /// aggressive peepholing optimizations that might delete it.
3491 ///
3492 /// Pass the result to unprotectFromPeepholes to declare that
3493 /// protection is no longer required.
3494 ///
3495 /// There's no particular reason why this shouldn't apply to
3496 /// l-values, it's just that no existing peepholes work on pointers.
3499
3500 void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
3502 SourceLocation AssumptionLoc,
3503 llvm::Value *Alignment,
3504 llvm::Value *OffsetValue,
3505 llvm::Value *TheCheck,
3506 llvm::Instruction *Assumption);
3507
3508 void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
3509 SourceLocation Loc, SourceLocation AssumptionLoc,
3510 llvm::Value *Alignment,
3511 llvm::Value *OffsetValue = nullptr);
3512
3513 void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
3514 SourceLocation AssumptionLoc,
3515 llvm::Value *Alignment,
3516 llvm::Value *OffsetValue = nullptr);
3517
3518 //===--------------------------------------------------------------------===//
3519 // Statement Emission
3520 //===--------------------------------------------------------------------===//
3521
3522 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
3523 void EmitStopPoint(const Stmt *S);
3524
3525 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
3526 /// this function even if there is no current insertion point.
3527 ///
3528 /// This function may clear the current insertion point; callers should use
3529 /// EnsureInsertPoint if they wish to subsequently generate code without first
3530 /// calling EmitBlock, EmitBranch, or EmitStmt.
3531 void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = {});
3532
3533 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
3534 /// necessarily require an insertion point or debug information; typically
3535 /// because the statement amounts to a jump or a container of other
3536 /// statements.
3537 ///
3538 /// \return True if the statement was handled.
3540
3541 Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
3544 bool GetLast = false,
3545 AggValueSlot AVS =
3547
3548 /// EmitLabel - Emit the block for the given label. It is legal to call this
3549 /// function even if there is no current insertion point.
3550 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
3551
3552 void EmitLabelStmt(const LabelStmt &S);
3554 void EmitGotoStmt(const GotoStmt &S);
3556 void EmitIfStmt(const IfStmt &S);
3557
3559 void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = {});
3560 void EmitForStmt(const ForStmt &S, ArrayRef<const Attr *> Attrs = {});
3562 void EmitDeclStmt(const DeclStmt &S);
3563 void EmitBreakStmt(const BreakStmt &S);
3569 void EmitAsmStmt(const AsmStmt &S);
3570
3576
3581 bool ignoreResult = false);
3585 bool ignoreResult = false);
3587 RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
3588
3589 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3590 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
3591
3597 void VolatilizeTryBlocks(llvm::BasicBlock *BB,
3599
3601 llvm::Function *FinallyFunc);
3603 const Stmt *OutlinedStmt);
3604
3606 const SEHExceptStmt &Except);
3607
3609 const SEHFinallyStmt &Finally);
3610
3612 llvm::Value *ParentFP,
3613 llvm::Value *EntryEBP);
3614 llvm::Value *EmitSEHExceptionCode();
3615 llvm::Value *EmitSEHExceptionInfo();
3617
3618 /// Emit simple code for OpenMP directives in Simd-only mode.
3620
3621 /// Scan the outlined statement for captures from the parent function. For
3622 /// each capture, mark the capture as escaped and emit a call to
3623 /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
3625 bool IsFilter);
3626
3627 /// Recovers the address of a local in a parent function. ParentVar is the
3628 /// address of the variable used in the immediate parent function. It can
3629 /// either be an alloca or a call to llvm.localrecover if there are nested
3630 /// outlined functions. ParentFP is the frame pointer of the outermost parent
3631 /// frame.
3633 Address ParentVar,
3634 llvm::Value *ParentFP);
3635
3637 ArrayRef<const Attr *> Attrs = {});
3638
3639 /// Controls insertion of cancellation exit blocks in worksharing constructs.
3641 CodeGenFunction &CGF;
3642
3643 public:
3645 bool HasCancel)
3646 : CGF(CGF) {
3647 CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
3648 }
3649 ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
3650 };
3651
3652 /// Returns calculated size of the specified type.
3653 llvm::Value *getTypeSize(QualType Ty);
3661 SmallVectorImpl<llvm::Value *> &CapturedVars);
3662 void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
3664 /// Perform element by element copying of arrays with type \a
3665 /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
3666 /// generated by \a CopyGen.
3667 ///
3668 /// \param DestAddr Address of the destination array.
3669 /// \param SrcAddr Address of the source array.
3670 /// \param OriginalType Type of destination and source arrays.
3671 /// \param CopyGen Copying procedure that copies value of single array element
3672 /// to another single array element.
3674 Address DestAddr, Address SrcAddr, QualType OriginalType,
3675 const llvm::function_ref<void(Address, Address)> CopyGen);
3676 /// Emit proper copying of data from one variable to another.
3677 ///
3678 /// \param OriginalType Original type of the copied variables.
3679 /// \param DestAddr Destination address.
3680 /// \param SrcAddr Source address.
3681 /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
3682 /// type of the base array element).
3683 /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
3684 /// the base array element).
3685 /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
3686 /// DestVD.
3687 void EmitOMPCopy(QualType OriginalType,
3688 Address DestAddr, Address SrcAddr,
3689 const VarDecl *DestVD, const VarDecl *SrcVD,
3690 const Expr *Copy);
3691 /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
3692 /// \a X = \a E \a BO \a E.
3693 ///
3694 /// \param X Value to be updated.
3695 /// \param E Update value.
3696 /// \param BO Binary operation for update operation.
3697 /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3698 /// expression, false otherwise.
3699 /// \param AO Atomic ordering of the generated atomic instructions.
3700 /// \param CommonGen Code generator for complex expressions that cannot be
3701 /// expressed through atomicrmw instruction.
3702 /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3703 /// generated, <false, RValue::get(nullptr)> otherwise.
3704 std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
3705 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
3706 llvm::AtomicOrdering AO, SourceLocation Loc,
3707 const llvm::function_ref<RValue(RValue)> CommonGen);
3709 OMPPrivateScope &PrivateScope);
3711 OMPPrivateScope &PrivateScope);
3713 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
3714 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3715 CaptureDeviceAddrMap);
3717 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
3718 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
3719 CaptureDeviceAddrMap);
3720 /// Emit code for copyin clause in \a D directive. The next code is
3721 /// generated at the start of outlined functions for directives:
3722 /// \code
3723 /// threadprivate_var1 = master_threadprivate_var1;
3724 /// operator=(threadprivate_var2, master_threadprivate_var2);
3725 /// ...
3726 /// __kmpc_barrier(&loc, global_tid);
3727 /// \endcode
3728 ///
3729 /// \param D OpenMP directive possibly with 'copyin' clause(s).
3730 /// \returns true if at least one copyin variable is found, false otherwise.
3732 /// Emit initial code for lastprivate variables. If some variable is
3733 /// not also firstprivate, then the default initialization is used. Otherwise
3734 /// initialization of this variable is performed by EmitOMPFirstprivateClause
3735 /// method.
3736 ///
3737 /// \param D Directive that may have 'lastprivate' directives.
3738 /// \param PrivateScope Private scope for capturing lastprivate variables for
3739 /// proper codegen in internal captured statement.
3740 ///
3741 /// \returns true if there is at least one lastprivate variable, false
3742 /// otherwise.
3744 OMPPrivateScope &PrivateScope);
3745 /// Emit final copying of lastprivate values to original variables at
3746 /// the end of the worksharing or simd directive.
3747 ///
3748 /// \param D Directive that has at least one 'lastprivate' directives.
3749 /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3750 /// it is the last iteration of the loop code in associated directive, or to
3751 /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3753 bool NoFinals,
3754 llvm::Value *IsLastIterCond = nullptr);
3755 /// Emit initial code for linear clauses.
3757 CodeGenFunction::OMPPrivateScope &PrivateScope);
3758 /// Emit final code for linear clauses.
3759 /// \param CondGen Optional conditional code for final part of codegen for
3760 /// linear clause.
3762 const OMPLoopDirective &D,
3763 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
3764 /// Emit initial code for reduction variables. Creates reduction copies
3765 /// and initializes them with the values according to OpenMP standard.
3766 ///
3767 /// \param D Directive (possibly) with the 'reduction' clause.
3768 /// \param PrivateScope Private scope for capturing reduction variables for
3769 /// proper codegen in internal captured statement.
3770 ///
3772 OMPPrivateScope &PrivateScope,
3773 bool ForInscan = false);
3774 /// Emit final update of reduction values to original variables at
3775 /// the end of the directive.
3776 ///
3777 /// \param D Directive that has at least one 'reduction' directives.
3778 /// \param ReductionKind The kind of reduction to perform.
3780 const OpenMPDirectiveKind ReductionKind);
3781 /// Emit initial code for linear variables. Creates private copies
3782 /// and initializes them with the values according to OpenMP standard.
3783 ///
3784 /// \param D Directive (possibly) with the 'linear' clause.
3785 /// \return true if at least one linear variable is found that should be
3786 /// initialized with the value of the original variable, false otherwise.
3788
3789 typedef const llvm::function_ref<void(CodeGenFunction & /*CGF*/,
3790 llvm::Function * /*OutlinedFn*/,
3791 const OMPTaskDataTy & /*Data*/)>
3794 const OpenMPDirectiveKind CapturedRegion,
3795 const RegionCodeGenTy &BodyGen,
3796 const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
3803 explicit OMPTargetDataInfo() = default;
3806 unsigned NumberOfTargetItems)
3810 };
3812 const RegionCodeGenTy &BodyGen,
3813 OMPTargetDataInfo &InputInfo);
3816 CodeGenFunction &CGF,
3817 const CapturedStmt *CS,
3856 void
3859 void
3866 void
3882 void
3907
3908 /// Emit device code for the target directive.
3910 StringRef ParentName,
3911 const OMPTargetDirective &S);
3912 static void
3915 /// Emit device code for the target parallel for directive.
3917 CodeGenModule &CGM, StringRef ParentName,
3919 /// Emit device code for the target parallel for simd directive.
3921 CodeGenModule &CGM, StringRef ParentName,
3923 /// Emit device code for the target teams directive.
3924 static void
3926 const OMPTargetTeamsDirective &S);
3927 /// Emit device code for the target teams distribute directive.
3929 CodeGenModule &CGM, StringRef ParentName,
3931 /// Emit device code for the target teams distribute simd directive.
3933 CodeGenModule &CGM, StringRef ParentName,
3935 /// Emit device code for the target simd directive.
3937 StringRef ParentName,
3938 const OMPTargetSimdDirective &S);
3939 /// Emit device code for the target teams distribute parallel for simd
3940 /// directive.
3942 CodeGenModule &CGM, StringRef ParentName,
3944
3945 /// Emit device code for the target teams loop directive.
3947 CodeGenModule &CGM, StringRef ParentName,
3949
3950 /// Emit device code for the target parallel loop directive.
3952 CodeGenModule &CGM, StringRef ParentName,
3954
3956 CodeGenModule &CGM, StringRef ParentName,
3958
3959 /// Emit the Stmt \p S and return its topmost canonical loop, if any.
3960 /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
3961 /// future it is meant to be the number of loops expected in the loop nests
3962 /// (usually specified by the "collapse" clause) that are collapsed to a
3963 /// single loop by this function.
3964 llvm::CanonicalLoopInfo *EmitOMPCollapsedCanonicalLoopNest(const Stmt *S,
3965 int Depth);
3966
3967 /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
3969
3970 /// Emit inner loop of the worksharing/simd construct.
3971 ///
3972 /// \param S Directive, for which the inner loop must be emitted.
3973 /// \param RequiresCleanup true, if directive has some associated private
3974 /// variables.
3975 /// \param LoopCond Bollean condition for loop continuation.
3976 /// \param IncExpr Increment expression for loop control variable.
3977 /// \param BodyGen Generator for the inner body of the inner loop.
3978 /// \param PostIncGen Genrator for post-increment code (required for ordered
3979 /// loop directvies).
3981 const OMPExecutableDirective &S, bool RequiresCleanup,
3982 const Expr *LoopCond, const Expr *IncExpr,
3983 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
3984 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
3985
3987 /// Emit initial code for loop counters of loop-based directives.
3989 OMPPrivateScope &LoopScope);
3990
3991 /// Helper for the OpenMP loop directives.
3993
3994 /// Emit code for the worksharing loop-based directive.
3995 /// \return true, if this construct has any lastprivate clause, false -
3996 /// otherwise.
3998 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3999 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4000
4001 /// Emit code for the distribute loop-based directive.
4003 const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
4004
4005 /// Helpers for the OpenMP loop directives.
4008 const OMPLoopDirective &D,
4009 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
4010
4011 /// Emits the lvalue for the expression with possibly captured variable.
4013
4014private:
4015 /// Helpers for blocks.
4016 llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
4017
4018 /// struct with the values to be passed to the OpenMP loop-related functions
4019 struct OMPLoopArguments {
4020 /// loop lower bound
4022 /// loop upper bound
4024 /// loop stride
4026 /// isLastIteration argument for runtime functions
4028 /// Chunk value generated by sema
4029 llvm::Value *Chunk = nullptr;
4030 /// EnsureUpperBound
4031 Expr *EUB = nullptr;
4032 /// IncrementExpression
4033 Expr *IncExpr = nullptr;
4034 /// Loop initialization
4035 Expr *Init = nullptr;
4036 /// Loop exit condition
4037 Expr *Cond = nullptr;
4038 /// Update of LB after a whole chunk has been executed
4039 Expr *NextLB = nullptr;
4040 /// Update of UB after a whole chunk has been executed
4041 Expr *NextUB = nullptr;
4042 /// Distinguish between the for distribute and sections
4043 OpenMPDirectiveKind DKind = llvm::omp::OMPD_unknown;
4044 OMPLoopArguments() = default;
4045 OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
4046 llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
4047 Expr *IncExpr = nullptr, Expr *Init = nullptr,
4048 Expr *Cond = nullptr, Expr *NextLB = nullptr,
4049 Expr *NextUB = nullptr)
4050 : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
4051 IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
4052 NextUB(NextUB) {}
4053 };
4054 void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
4055 const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
4056 const OMPLoopArguments &LoopArgs,
4057 const CodeGenLoopTy &CodeGenLoop,
4058 const CodeGenOrderedTy &CodeGenOrdered);
4059 void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
4060 bool IsMonotonic, const OMPLoopDirective &S,
4061 OMPPrivateScope &LoopScope, bool Ordered,
4062 const OMPLoopArguments &LoopArgs,
4063 const CodeGenDispatchBoundsTy &CGDispatchBounds);
4064 void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
4065 const OMPLoopDirective &S,
4066 OMPPrivateScope &LoopScope,
4067 const OMPLoopArguments &LoopArgs,
4068 const CodeGenLoopTy &CodeGenLoopContent);
4069 /// Emit code for sections directive.
4070 void EmitSections(const OMPExecutableDirective &S);
4071
4072public:
4073 //===--------------------------------------------------------------------===//
4074 // OpenACC Emission
4075 //===--------------------------------------------------------------------===//
4077 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4078 // simply emitting its structured block, but in the future we will implement
4079 // some sort of IR.
4080 EmitStmt(S.getStructuredBlock());
4081 }
4082
4084 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4085 // simply emitting its loop, but in the future we will implement
4086 // some sort of IR.
4087 EmitStmt(S.getLoop());
4088 }
4089
4091 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4092 // simply emitting its loop, but in the future we will implement
4093 // some sort of IR.
4094 EmitStmt(S.getLoop());
4095 }
4096
4098 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4099 // simply emitting its structured block, but in the future we will implement
4100 // some sort of IR.
4101 EmitStmt(S.getStructuredBlock());
4102 }
4103
4105 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4106 // but in the future we will implement some sort of IR.
4107 }
4108
4110 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4111 // but in the future we will implement some sort of IR.
4112 }
4113
4115 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4116 // simply emitting its structured block, but in the future we will implement
4117 // some sort of IR.
4118 EmitStmt(S.getStructuredBlock());
4119 }
4120
4122 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4123 // but in the future we will implement some sort of IR.
4124 }
4125
4127 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4128 // but in the future we will implement some sort of IR.
4129 }
4130
4132 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4133 // but in the future we will implement some sort of IR.
4134 }
4135
4136 //===--------------------------------------------------------------------===//
4137 // LValue Expression Emission
4138 //===--------------------------------------------------------------------===//
4139
4140 /// Create a check that a scalar RValue is non-null.
4142
4143 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
4145
4146 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
4147 /// and issue an ErrorUnsupported style diagnostic (using the
4148 /// provided Name).
4150 const char *Name);
4151
4152 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
4153 /// an ErrorUnsupported style diagnostic (using the provided Name).
4155 const char *Name);
4156
4157 /// EmitLValue - Emit code to compute a designator that specifies the location
4158 /// of the expression.
4159 ///
4160 /// This can return one of two things: a simple address or a bitfield
4161 /// reference. In either case, the LLVM Value* in the LValue structure is
4162 /// guaranteed to be an LLVM pointer type.
4163 ///
4164 /// If this returns a bitfield reference, nothing about the pointee type of
4165 /// the LLVM value is known: For example, it may not be a pointer to an
4166 /// integer.
4167 ///
4168 /// If this returns a normal address, and if the lvalue's C type is fixed
4169 /// size, this method guarantees that the returned pointer type will point to
4170 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
4171 /// variable length type, this is not possible.
4172 ///
4174 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
4175
4176private:
4177 LValue EmitLValueHelper(const Expr *E, KnownNonNull_t IsKnownNonNull);
4178
4179public:
4180 /// Same as EmitLValue but additionally we generate checking code to
4181 /// guard against undefined behavior. This is only suitable when we know
4182 /// that the address will be used to access the object.
4184
4187
4188 void EmitAtomicInit(Expr *E, LValue lvalue);
4189
4191
4194
4196 llvm::AtomicOrdering AO, bool IsVolatile = false,
4198
4199 void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
4200
4201 void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
4202 bool IsVolatile, bool isInit);
4203
4204 std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
4206 llvm::AtomicOrdering Success =
4207 llvm::AtomicOrdering::SequentiallyConsistent,
4208 llvm::AtomicOrdering Failure =
4209 llvm::AtomicOrdering::SequentiallyConsistent,
4210 bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
4211
4212 /// Emit an atomicrmw instruction, and applying relevant metadata when
4213 /// applicable.
4214 llvm::AtomicRMWInst *emitAtomicRMWInst(
4215 llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val,
4216 llvm::AtomicOrdering Order = llvm::AtomicOrdering::SequentiallyConsistent,
4217 llvm::SyncScope::ID SSID = llvm::SyncScope::System,
4218 const AtomicExpr *AE = nullptr);
4219
4220 void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
4221 const llvm::function_ref<RValue(RValue)> &UpdateOp,
4222 bool IsVolatile);
4223
4224 /// EmitToMemory - Change a scalar value from its value
4225 /// representation to its in-memory representation.
4226 llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
4227
4228 /// EmitFromMemory - Change a scalar value from its memory
4229 /// representation to its value representation.
4230 llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
4231
4232 /// Check if the scalar \p Value is within the valid range for the given
4233 /// type \p Ty.
4234 ///
4235 /// Returns true if a check is needed (even if the range is unknown).
4236 bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
4238
4239 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4240 /// care to appropriately convert from the memory representation to
4241 /// the LLVM value representation.
4242 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4245 bool isNontemporal = false) {
4246 return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source),
4247 CGM.getTBAAAccessInfo(Ty), isNontemporal);
4248 }
4249
4250 llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
4252 TBAAAccessInfo TBAAInfo,
4253 bool isNontemporal = false);
4254
4255 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4256 /// care to appropriately convert from the memory representation to
4257 /// the LLVM value representation. The l-value must be a simple
4258 /// l-value.
4260
4261 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4262 /// care to appropriately convert from the memory representation to
4263 /// the LLVM value representation.
4264 void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
4265 bool Volatile, QualType Ty,
4267 bool isInit = false, bool isNontemporal = false) {
4268 EmitStoreOfScalar(Value, Addr, Volatile, Ty, LValueBaseInfo(Source),
4269 CGM.getTBAAAccessInfo(Ty), isInit, isNontemporal);
4270 }
4271
4272 void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
4273 bool Volatile, QualType Ty,
4274 LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo,
4275 bool isInit = false, bool isNontemporal = false);
4276
4277 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4278 /// care to appropriately convert from the memory representation to
4279 /// the LLVM value representation. The l-value must be a simple
4280 /// l-value. The isInit flag indicates whether this is an initialization.
4281 /// If so, atomic qualifiers are ignored and the store is always non-atomic.
4282 void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
4283
4284 /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
4285 /// this method emits the address of the lvalue, then loads the result as an
4286 /// rvalue, returning the rvalue.
4291
4292 /// Like EmitLoadOfLValue but also handles complex and aggregate types.
4295 SourceLocation Loc = {});
4296
4297 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
4298 /// lvalue, where both are guaranteed to the have the same type, and that type
4299 /// is 'Ty'.
4300 void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
4303
4304 /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
4305 /// as EmitStoreThroughLValue.
4306 ///
4307 /// \param Result [out] - If non-null, this will be set to a Value* for the
4308 /// bit-field contents after the store, appropriate for use as the result of
4309 /// an assignment to the bit-field.
4311 llvm::Value **Result=nullptr);
4312
4313 /// Emit an l-value for an assignment (simple or compound) of complex type.
4317 llvm::Value *&Result);
4318
4319 // Note: only available for agg return types
4322 // Note: only available for agg return types
4324 llvm::CallBase **CallOrInvoke = nullptr);
4325 // Note: only available for agg return types
4333 bool Accessed = false);
4334 llvm::Value *EmitMatrixIndexExpr(const Expr *E);
4337 bool IsLowerBound = true);
4349
4350 std::pair<LValue, LValue> EmitHLSLOutArgLValues(const HLSLOutArgExpr *E,
4351 QualType Ty);
4353 QualType Ty);
4354
4356
4358
4360 LValueBaseInfo *BaseInfo = nullptr,
4361 TBAAAccessInfo *TBAAInfo = nullptr);
4362
4364 llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
4365 ConstantEmission(llvm::Constant *C, bool isReference)
4366 : ValueAndIsReference(C, isReference) {}
4367 public:
4369 static ConstantEmission forReference(llvm::Constant *C) {
4370 return ConstantEmission(C, true);
4371 }
4372 static ConstantEmission forValue(llvm::Constant *C) {
4373 return ConstantEmission(C, false);
4374 }
4375
4376 explicit operator bool() const {
4377 return ValueAndIsReference.getOpaqueValue() != nullptr;
4378 }
4379
4380 bool isReference() const { return ValueAndIsReference.getInt(); }
4382 assert(isReference());
4383 return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
4384 refExpr->getType());
4385 }
4386
4387 llvm::Constant *getValue() const {
4388 assert(!isReference());
4389 return ValueAndIsReference.getPointer();
4390 }
4391 };
4392
4395 llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E);
4396
4400
4402 const ObjCIvarDecl *Ivar);
4404 const ObjCIvarDecl *Ivar);
4408 llvm::Value *ThisValue);
4409
4410 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
4411 /// if the Field is a reference, this will return the address of the reference
4412 /// and not the address of the value stored in the reference.
4414 const FieldDecl* Field);
4415
4417 llvm::Value* Base, const ObjCIvarDecl *Ivar,
4418 unsigned CVRQualifiers);
4419
4424
4431
4432 //===--------------------------------------------------------------------===//
4433 // Scalar Expression Emission
4434 //===--------------------------------------------------------------------===//
4435
4436 /// EmitCall - Generate a call of the given function, expecting the given
4437 /// result type, and using the given argument list which specifies both the
4438 /// LLVM arguments and the types they were derived from.
4439 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4441 llvm::CallBase **CallOrInvoke, bool IsMustTail,
4443 bool IsVirtualFunctionPointerThunk = false);
4444 RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
4446 llvm::CallBase **CallOrInvoke = nullptr,
4447 bool IsMustTail = false) {
4448 return EmitCall(CallInfo, Callee, ReturnValue, Args, CallOrInvoke,
4449 IsMustTail, SourceLocation());
4450 }
4451 RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
4452 ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr,
4453 llvm::CallBase **CallOrInvoke = nullptr,
4454 CGFunctionInfo const **ResolvedFnInfo = nullptr);
4455
4456 // If a Call or Invoke instruction was emitted for this CallExpr, this method
4457 // writes the pointer to `CallOrInvoke` if it's not null.
4460 llvm::CallBase **CallOrInvoke = nullptr);
4462 llvm::CallBase **CallOrInvoke = nullptr);
4464
4465 void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
4467
4468 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4469 const Twine &name = "");
4470 llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee,
4472 const Twine &name = "");
4473 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4474 const Twine &name = "");
4475 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4476 ArrayRef<Address> args,
4477 const Twine &name = "");
4478 llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4480 const Twine &name = "");
4481
4483 getBundlesForFunclet(llvm::Value *Callee);
4484
4485 llvm::CallBase *EmitCallOrInvoke(llvm::FunctionCallee Callee,
4487 const Twine &Name = "");
4488 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4490 const Twine &name = "");
4491 llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4492 const Twine &name = "");
4493 void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4495
4497 NestedNameSpecifier *Qual,
4498 llvm::Type *Ty);
4499
4502 const CXXRecordDecl *RD);
4503
4505
4506 /// Create the discriminator from the storage address and the entity hash.
4507 llvm::Value *EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress,
4508 llvm::Value *Discriminator);
4510 llvm::Value *StorageAddress,
4511 GlobalDecl SchemaDecl,
4512 QualType SchemaType);
4513
4514 llvm::Value *EmitPointerAuthSign(const CGPointerAuthInfo &Info,
4515 llvm::Value *Pointer);
4516
4517 llvm::Value *EmitPointerAuthAuth(const CGPointerAuthInfo &Info,
4518 llvm::Value *Pointer);
4519
4521 const CGPointerAuthInfo &CurAuthInfo,
4522 const CGPointerAuthInfo &NewAuthInfo,
4523 bool IsKnownNonNull);
4524 llvm::Value *emitPointerAuthResignCall(llvm::Value *Pointer,
4525 const CGPointerAuthInfo &CurInfo,
4526 const CGPointerAuthInfo &NewInfo);
4527
4529 const CGPointerAuthInfo &Info,
4531
4532 llvm::Value *authPointerToPointerCast(llvm::Value *ResultPtr,
4533 QualType SourceType, QualType DestType);
4535 QualType DestType);
4536
4538
4539 llvm::Value *getAsNaturalPointerTo(Address Addr, QualType PointeeType) {
4540 return getAsNaturalAddressOf(Addr, PointeeType).getBasePointer();
4541 }
4542
4543 // Return the copy constructor name with the prefix "__copy_constructor_"
4544 // removed.
4546 CharUnits Alignment,
4547 bool IsVolatile,
4548 ASTContext &Ctx);
4549
4550 // Return the destructor name with the prefix "__destructor_" removed.
4552 CharUnits Alignment,
4553 bool IsVolatile,
4554 ASTContext &Ctx);
4555
4556 // These functions emit calls to the special functions of non-trivial C
4557 // structs.
4565
4567 const CXXMethodDecl *Method, const CGCallee &Callee,
4568 ReturnValueSlot ReturnValue, llvm::Value *This,
4569 llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E,
4570 CallArgList *RtlArgs, llvm::CallBase **CallOrInvoke);
4572 llvm::Value *This, QualType ThisTy,
4573 llvm::Value *ImplicitParam,
4574 QualType ImplicitParamTy, const CallExpr *E,
4575 llvm::CallBase **CallOrInvoke = nullptr);
4578 llvm::CallBase **CallOrInvoke = nullptr);
4580 const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
4581 bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
4582 const Expr *Base, llvm::CallBase **CallOrInvoke);
4583 // Compute the object pointer.
4585 llvm::Value *memberPtr,
4586 const MemberPointerType *memberPtrType,
4587 LValueBaseInfo *BaseInfo = nullptr,
4588 TBAAAccessInfo *TBAAInfo = nullptr);
4591 llvm::CallBase **CallOrInvoke);
4592
4594 const CXXMethodDecl *MD,
4596 llvm::CallBase **CallOrInvoke);
4598
4601 llvm::CallBase **CallOrInvoke);
4602
4605
4606 RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
4608
4609 RValue emitRotate(const CallExpr *E, bool IsRotateRight);
4610
4611 /// Emit IR for __builtin_os_log_format.
4613
4614 /// Emit IR for __builtin_is_aligned.
4616 /// Emit IR for __builtin_align_up/__builtin_align_down.
4617 RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp);
4618
4621 CharUnits BufferAlignment);
4622
4624 llvm::CallBase **CallOrInvoke);
4625
4626 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
4627 /// is unhandled by the current target.
4628 llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4630
4631 llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
4632 const llvm::CmpInst::Predicate Fp,
4633 const llvm::CmpInst::Predicate Ip,
4634 const llvm::Twine &Name = "");
4635 llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4637 llvm::Triple::ArchType Arch);
4638 llvm::Value *EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4640 llvm::Triple::ArchType Arch);
4641 llvm::Value *EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4643 llvm::Triple::ArchType Arch);
4644 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy,
4645 QualType RTy);
4646 llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy,
4647 QualType RTy);
4648
4649 llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
4650 unsigned LLVMIntrinsic,
4651 unsigned AltLLVMIntrinsic,
4652 const char *NameHint,
4653 unsigned Modifier,
4654 const CallExpr *E,
4656 Address PtrOp0, Address PtrOp1,
4657 llvm::Triple::ArchType Arch);
4658
4659 llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
4660 unsigned Modifier, llvm::Type *ArgTy,
4661 const CallExpr *E);
4662 llvm::Value *EmitNeonCall(llvm::Function *F,
4664 const char *name,
4665 unsigned shift = 0, bool rightshift = false);
4666 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
4667 const llvm::ElementCount &Count);
4668 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
4669 llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
4670 bool negateForRightShift);
4671 llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
4672 llvm::Type *Ty, bool usgn, const char *name);
4673 llvm::Value *vectorWrapScalar16(llvm::Value *Op);
4674 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
4675 /// access builtin. Only required if it can't be inferred from the base
4676 /// pointer operand.
4677 llvm::Type *SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags);
4678
4680 getSVEOverloadTypes(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType,
4682 llvm::Type *getEltType(const SVETypeFlags &TypeFlags);
4683 llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags);
4684 llvm::ScalableVectorType *getSVEPredType(const SVETypeFlags &TypeFlags);
4685 llvm::Value *EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
4687 llvm::Value *EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
4688 llvm::Type *ReturnType,
4690 llvm::Value *EmitSVEAllTruePred(const SVETypeFlags &TypeFlags);
4691 llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
4692 llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty);
4693 llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty);
4694 llvm::Value *EmitSVEPMull(const SVETypeFlags &TypeFlags,
4696 unsigned BuiltinID);
4697 llvm::Value *EmitSVEMovl(const SVETypeFlags &TypeFlags,
4699 unsigned BuiltinID);
4700 llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred,
4701 llvm::ScalableVectorType *VTy);
4702 llvm::Value *EmitSVEPredicateTupleCast(llvm::Value *PredTuple,
4703 llvm::StructType *Ty);
4704 llvm::Value *EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
4706 unsigned IntID);
4707 llvm::Value *EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
4709 unsigned IntID);
4710 llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy,
4712 unsigned BuiltinID, bool IsZExtReturn);
4713 llvm::Value *EmitSVEMaskedStore(const CallExpr *,
4715 unsigned BuiltinID);
4716 llvm::Value *EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
4718 unsigned BuiltinID);
4719 llvm::Value *EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
4721 unsigned IntID);
4722 llvm::Value *EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
4724 unsigned IntID);
4725 llvm::Value *EmitSVEStructStore(const SVETypeFlags &TypeFlags,
4727 unsigned IntID);
4728 llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4729
4730 llvm::Value *EmitSMELd1St1(const SVETypeFlags &TypeFlags,
4732 unsigned IntID);
4733 llvm::Value *EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
4735 unsigned IntID);
4736 llvm::Value *EmitSMEZero(const SVETypeFlags &TypeFlags,
4738 unsigned IntID);
4739 llvm::Value *EmitSMELdrStr(const SVETypeFlags &TypeFlags,
4741 unsigned IntID);
4742
4743 void GetAArch64SVEProcessedOperands(unsigned BuiltinID, const CallExpr *E,
4745 SVETypeFlags TypeFlags);
4746
4747 llvm::Value *EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4748
4749 llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4750 llvm::Triple::ArchType Arch);
4751 llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4752
4754 llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4755 llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4756 llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4757 llvm::Value *EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4759 llvm::Value *EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx,
4760 const CallExpr *E);
4761 llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4762 llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4763 llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
4764 const CallExpr *E);
4765 llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
4766 llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4768
4769 llvm::Value *EmitRISCVCpuSupports(const CallExpr *E);
4770 llvm::Value *EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs);
4771 llvm::Value *EmitRISCVCpuInit();
4772 llvm::Value *EmitRISCVCpuIs(const CallExpr *E);
4773 llvm::Value *EmitRISCVCpuIs(StringRef CPUStr);
4774
4775 void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
4776 const CallExpr *E);
4777 void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
4778 llvm::AtomicOrdering &AO,
4779 llvm::SyncScope::ID &SSID);
4780
4781 enum class MSVCIntrin;
4782 llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
4783
4784 llvm::Value *EmitBuiltinAvailable(const VersionTuple &Version);
4785
4788 llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
4791 llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
4792 const ObjCMethodDecl *MethodWithObjects);
4795 ReturnValueSlot Return = ReturnValueSlot());
4796
4797 /// Retrieves the default cleanup kind for an ARC cleanup.
4798 /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
4800 return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
4802 }
4803
4804 // ARC primitives.
4805 void EmitARCInitWeak(Address addr, llvm::Value *value);
4807 llvm::Value *EmitARCLoadWeak(Address addr);
4809 llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
4810 void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4811 void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
4814 llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
4815 llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
4816 llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
4817 bool resultIgnored);
4818 llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
4819 bool resultIgnored);
4820 llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
4821 llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
4822 llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
4824 void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4825 llvm::Value *EmitARCAutorelease(llvm::Value *value);
4826 llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
4827 llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
4828 llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
4829 llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
4830
4831 llvm::Value *EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType);
4832 llvm::Value *EmitObjCRetainNonBlock(llvm::Value *value,
4833 llvm::Type *returnType);
4834 void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
4835
4836 std::pair<LValue,llvm::Value*>
4838 std::pair<LValue,llvm::Value*>
4839 EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
4840 std::pair<LValue,llvm::Value*>
4842
4843 llvm::Value *EmitObjCAlloc(llvm::Value *value,
4844 llvm::Type *returnType);
4845 llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
4846 llvm::Type *returnType);
4847 llvm::Value *EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType);
4848
4849 llvm::Value *EmitObjCThrowOperand(const Expr *expr);
4850 llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
4851 llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
4852
4853 llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
4854 llvm::Value *EmitARCReclaimReturnedObject(const Expr *e,
4855 bool allowUnsafeClaim);
4856 llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
4859
4861
4863
4869
4870 void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
4873 void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
4874 void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
4875
4876 /// Emits a reference binding to the passed in expression.
4878
4879 //===--------------------------------------------------------------------===//
4880 // Expression Emission
4881 //===--------------------------------------------------------------------===//
4882
4883 // Expressions are broken into three classes: scalar, complex, aggregate.
4884
4885 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
4886 /// scalar type, returning the result.
4887 llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
4888
4889 /// Emit a conversion from the specified type to the specified destination
4890 /// type, both of which are LLVM scalar types.
4891 llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
4892 QualType DstTy, SourceLocation Loc);
4893
4894 /// Emit a conversion from the specified complex type to the specified
4895 /// destination type, where the destination type is an LLVM scalar type.
4897 QualType DstTy,
4899
4900 /// EmitAggExpr - Emit the computation of the specified expression
4901 /// of aggregate type. The result is computed into the given slot,
4902 /// which may be null to indicate that the value is not needed.
4903 void EmitAggExpr(const Expr *E, AggValueSlot AS);
4904
4905 /// EmitAggExprToLValue - Emit the computation of the specified expression of
4906 /// aggregate type into a temporary LValue.
4908
4910
4911 /// EmitAggFinalDestCopy - Emit copy of the specified aggregate into
4912 /// destination address.
4914 ExprValueKind SrcKind);
4915
4916 /// Create a store to \arg DstPtr from \arg Src, truncating the stored value
4917 /// to at most \arg DstSize bytes.
4918 void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize,
4919 bool DstIsVolatile);
4920
4921 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
4922 /// make sure it survives garbage collection until this point.
4923 void EmitExtendGCLifetime(llvm::Value *object);
4924
4925 /// EmitComplexExpr - Emit the computation of the specified expression of
4926 /// complex type, returning the result.
4928 bool IgnoreReal = false,
4929 bool IgnoreImag = false);
4930
4931 /// EmitComplexExprIntoLValue - Emit the given expression of complex
4932 /// type and place its result into the specified l-value.
4933 void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
4934
4935 /// EmitStoreOfComplex - Store a complex number into the specified l-value.
4936 void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
4937
4938 /// EmitLoadOfComplex - Load a complex number from the specified l-value.
4940
4942 llvm::Value *EmitPromotedScalarExpr(const Expr *E, QualType PromotionType);
4945
4948
4949 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
4950 /// global variable that has already been created for it. If the initializer
4951 /// has a different type than GV does, this may free GV and return a different
4952 /// one. Otherwise it just returns GV.
4953 llvm::GlobalVariable *
4955 llvm::GlobalVariable *GV);
4956
4957 // Emit an @llvm.invariant.start call for the given memory region.
4958 void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size);
4959
4960 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
4961 /// variable with global storage.
4962 void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV,
4963 bool PerformInit);
4964
4965 llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
4966 llvm::Constant *Addr);
4967
4968 llvm::Function *createTLSAtExitStub(const VarDecl &VD,
4969 llvm::FunctionCallee Dtor,
4970 llvm::Constant *Addr,
4971 llvm::FunctionCallee &AtExit);
4972
4973 /// Call atexit() with a function that passes the given argument to
4974 /// the given function.
4975 void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
4976 llvm::Constant *addr);
4977
4978 /// Registers the dtor using 'llvm.global_dtors' for platforms that do not
4979 /// support an 'atexit()' function.
4980 void registerGlobalDtorWithLLVM(const VarDecl &D, llvm::FunctionCallee fn,
4981 llvm::Constant *addr);
4982
4983 /// Call atexit() with function dtorStub.
4984 void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
4985
4986 /// Call unatexit() with function dtorStub.
4987 llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub);
4988
4989 /// Emit code in this function to perform a guarded variable
4990 /// initialization. Guarded initializations are used when it's not
4991 /// possible to prove that an initialization will be done exactly
4992 /// once, e.g. with a static local variable or a static data member
4993 /// of a class template.
4994 void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
4995 bool PerformInit);
4996
4998
4999 /// Emit a branch to select whether or not to perform guarded initialization.
5000 void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
5001 llvm::BasicBlock *InitBlock,
5002 llvm::BasicBlock *NoInitBlock,
5003 GuardKind Kind, const VarDecl *D);
5004
5005 /// GenerateCXXGlobalInitFunc - Generates code for initializing global
5006 /// variables.
5007 void
5008 GenerateCXXGlobalInitFunc(llvm::Function *Fn,
5009 ArrayRef<llvm::Function *> CXXThreadLocals,
5011
5012 /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global
5013 /// variables.
5015 llvm::Function *Fn,
5016 ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
5017 llvm::Constant *>>
5018 DtorsOrStermFinalizers);
5019
5020 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
5021 const VarDecl *D,
5022 llvm::GlobalVariable *Addr,
5023 bool PerformInit);
5024
5026
5027 void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
5028
5029 void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
5030
5032
5033 //===--------------------------------------------------------------------===//
5034 // Annotations Emission
5035 //===--------------------------------------------------------------------===//
5036
5037 /// Emit an annotation call (intrinsic).
5038 llvm::Value *EmitAnnotationCall(llvm::Function *AnnotationFn,
5039 llvm::Value *AnnotatedVal,
5040 StringRef AnnotationStr,
5041 SourceLocation Location,
5042 const AnnotateAttr *Attr);
5043
5044 /// Emit local annotations for the local variable V, declared by D.
5045 void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
5046
5047 /// Emit field annotations for the given field & value. Returns the
5048 /// annotation result.
5050
5051 //===--------------------------------------------------------------------===//
5052 // Internal Helpers
5053 //===--------------------------------------------------------------------===//
5054
5055 /// ContainsLabel - Return true if the statement contains a label in it. If
5056 /// this statement is not executed normally, it not containing a label means
5057 /// that we can just remove the code.
5058 static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
5059
5060 /// containsBreak - Return true if the statement contains a break out of it.
5061 /// If the statement (recursively) contains a switch or loop with a break
5062 /// inside of it, this is fine.
5063 static bool containsBreak(const Stmt *S);
5064
5065 /// Determine if the given statement might introduce a declaration into the
5066 /// current scope, by being a (possibly-labelled) DeclStmt.
5067 static bool mightAddDeclToScope(const Stmt *S);
5068
5069 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5070 /// to a constant, or if it does but contains a label, return false. If it
5071 /// constant folds return true and set the boolean result in Result.
5073 bool AllowLabels = false);
5074
5075 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5076 /// to a constant, or if it does but contains a label, return false. If it
5077 /// constant folds return true and set the folded value.
5078 bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
5079 bool AllowLabels = false);
5080
5081 /// Ignore parentheses and logical-NOT to track conditions consistently.
5082 static const Expr *stripCond(const Expr *C);
5083
5084 /// isInstrumentedCondition - Determine whether the given condition is an
5085 /// instrumentable condition (i.e. no "&&" or "||").
5086 static bool isInstrumentedCondition(const Expr *C);
5087
5088 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
5089 /// increments a profile counter based on the semantics of the given logical
5090 /// operator opcode. This is used to instrument branch condition coverage
5091 /// for logical operators.
5093 llvm::BasicBlock *TrueBlock,
5094 llvm::BasicBlock *FalseBlock,
5095 uint64_t TrueCount = 0,
5097 const Expr *CntrIdx = nullptr);
5098
5099 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
5100 /// if statement) to the specified blocks. Based on the condition, this might
5101 /// try to simplify the codegen of the conditional based on the branch.
5102 /// TrueCount should be the number of times we expect the condition to
5103 /// evaluate to true based on PGO data.
5104 void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
5105 llvm::BasicBlock *FalseBlock, uint64_t TrueCount,
5107 const Expr *ConditionalOp = nullptr);
5108
5109 /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
5110 /// nonnull, if \p LHS is marked _Nonnull.
5111 void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
5112
5113 /// An enumeration which makes it easier to specify whether or not an
5114 /// operation is a subtraction.
5115 enum { NotSubtraction = false, IsSubtraction = true };
5116
5117 /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
5118 /// detect undefined behavior when the pointer overflow sanitizer is enabled.
5119 /// \p SignedIndices indicates whether any of the GEP indices are signed.
5120 /// \p IsSubtraction indicates whether the expression used to form the GEP
5121 /// is a subtraction.
5122 llvm::Value *EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr,
5124 bool SignedIndices,
5125 bool IsSubtraction,
5127 const Twine &Name = "");
5128
5130 llvm::Type *elementType, bool SignedIndices,
5132 CharUnits Align, const Twine &Name = "");
5133
5134 /// Specifies which type of sanitizer check to apply when handling a
5135 /// particular builtin.
5140 };
5141
5142 /// Emits an argument for a call to a builtin. If the builtin sanitizer is
5143 /// enabled, a runtime check specified by \p Kind is also emitted.
5145
5146 /// Emits an argument for a call to a `__builtin_assume`. If the builtin
5147 /// sanitizer is enabled, a runtime check is also emitted.
5148 llvm::Value *EmitCheckedArgForAssume(const Expr *E);
5149
5150 /// Emit a description of a type in a format suitable for passing to
5151 /// a runtime sanitizer handler.
5153
5154 /// Convert a value into a format suitable for passing to a runtime
5155 /// sanitizer handler.
5156 llvm::Value *EmitCheckValue(llvm::Value *V);
5157
5158 /// Emit a description of a source location in a format suitable for
5159 /// passing to a runtime sanitizer handler.
5161
5164
5165 /// Create a basic block that will either trap or call a handler function in
5166 /// the UBSan runtime with the provided arguments, and create a conditional
5167 /// branch to it.
5168 void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
5170 ArrayRef<llvm::Value *> DynamicArgs);
5171
5172 /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
5173 /// if Cond if false.
5174 void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond,
5175 llvm::ConstantInt *TypeId, llvm::Value *Ptr,
5176 ArrayRef<llvm::Constant *> StaticArgs);
5177
5178 /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
5179 /// checking is enabled. Otherwise, just emit an unreachable instruction.
5181
5182 /// Create a basic block that will call the trap intrinsic, and emit a
5183 /// conditional branch to it, for the -ftrapv checks.
5184 void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID,
5185 bool NoMerge = false);
5186
5187 /// Emit a call to trap or debugtrap and attach function attribute
5188 /// "trap-func-name" if specified.
5189 llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
5190
5191 /// Emit a stub for the cross-DSO CFI check function.
5193
5194 /// Emit a cross-DSO CFI failure handling function.
5196
5197 /// Create a check for a function parameter that may potentially be
5198 /// declared as non-null.
5200 AbstractCallee AC, unsigned ParmNum);
5201
5203 SourceLocation ArgLoc, AbstractCallee AC,
5204 unsigned ParmNum);
5205
5206 /// EmitWriteback - Emit callbacks for function.
5207 void EmitWritebacks(const CallArgList &Args);
5208
5209 /// EmitCallArg - Emit a single call argument.
5210 void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
5211
5212 /// EmitDelegateCallArg - We are performing a delegate call; that
5213 /// is, the current function is delegating to another one. Produce
5214 /// a r-value suitable for passing the given parameter.
5215 void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
5216 SourceLocation loc);
5217
5218 /// SetFPAccuracy - Set the minimum required accuracy of the given floating
5219 /// point operation, expressed as the maximum relative error in ulp.
5220 void SetFPAccuracy(llvm::Value *Val, float Accuracy);
5221
5222 /// Set the minimum required accuracy of the given sqrt operation
5223 /// based on CodeGenOpts.
5224 void SetSqrtFPAccuracy(llvm::Value *Val);
5225
5226 /// Set the minimum required accuracy of the given sqrt operation based on
5227 /// CodeGenOpts.
5228 void SetDivFPAccuracy(llvm::Value *Val);
5229
5230 /// Set the codegen fast-math flags.
5231 void SetFastMathFlags(FPOptions FPFeatures);
5232
5233 // Truncate or extend a boolean vector to the requested number of elements.
5234 llvm::Value *emitBoolVecConversion(llvm::Value *SrcVec,
5235 unsigned NumElementsDst,
5236 const llvm::Twine &Name = "");
5237 // Adds a convergence_ctrl token to |Input| and emits the required parent
5238 // convergence instructions.
5239 template <typename CallType>
5240 CallType *addControlledConvergenceToken(CallType *Input) {
5241 return cast<CallType>(
5242 addConvergenceControlToken(Input, ConvergenceTokenStack.back()));
5243 }
5244
5245private:
5246 // Emits a convergence_loop instruction for the given |BB|, with |ParentToken|
5247 // as it's parent convergence instr.
5248 llvm::IntrinsicInst *emitConvergenceLoopToken(llvm::BasicBlock *BB,
5249 llvm::Value *ParentToken);
5250 // Adds a convergence_ctrl token with |ParentToken| as parent convergence
5251 // instr to the call |Input|.
5252 llvm::CallBase *addConvergenceControlToken(llvm::CallBase *Input,
5253 llvm::Value *ParentToken);
5254 // Find the convergence_entry instruction |F|, or emits ones if none exists.
5255 // Returns the convergence instruction.
5256 llvm::IntrinsicInst *getOrEmitConvergenceEntryToken(llvm::Function *F);
5257 // Find the convergence_loop instruction for the loop defined by |LI|, or
5258 // emits one if none exists. Returns the convergence instruction.
5259 llvm::IntrinsicInst *getOrEmitConvergenceLoopToken(const LoopInfo *LI);
5260
5261private:
5262 llvm::MDNode *getRangeForLoadFromType(QualType Ty);
5263 void EmitReturnOfRValue(RValue RV, QualType Ty);
5264
5265 void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
5266
5268 DeferredReplacements;
5269
5270 /// Set the address of a local variable.
5271 void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
5272 assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
5273 LocalDeclMap.insert({VD, Addr});
5274 }
5275
5276 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
5277 /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
5278 ///
5279 /// \param AI - The first function argument of the expansion.
5280 void ExpandTypeFromArgs(QualType Ty, LValue Dst,
5281 llvm::Function::arg_iterator &AI);
5282
5283 /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
5284 /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
5285 /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
5286 void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
5287 SmallVectorImpl<llvm::Value *> &IRCallArgs,
5288 unsigned &IRCallArgPos);
5289
5290 std::pair<llvm::Value *, llvm::Type *>
5291 EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr,
5292 std::string &ConstraintStr);
5293
5294 std::pair<llvm::Value *, llvm::Type *>
5295 EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue,
5296 QualType InputType, std::string &ConstraintStr,
5297 SourceLocation Loc);
5298
5299 /// Attempts to statically evaluate the object size of E. If that
5300 /// fails, emits code to figure the size of E out for us. This is
5301 /// pass_object_size aware.
5302 ///
5303 /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
5304 llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
5305 llvm::IntegerType *ResType,
5306 llvm::Value *EmittedE,
5307 bool IsDynamic);
5308
5309 /// Emits the size of E, as required by __builtin_object_size. This
5310 /// function is aware of pass_object_size parameters, and will act accordingly
5311 /// if E is a parameter with the pass_object_size attribute.
5312 llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
5313 llvm::IntegerType *ResType,
5314 llvm::Value *EmittedE,
5315 bool IsDynamic);
5316
5317 llvm::Value *emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
5318 llvm::IntegerType *ResType);
5319
5320 void emitZeroOrPatternForAutoVarInit(QualType type, const VarDecl &D,
5321 Address Loc);
5322
5323public:
5324 enum class EvaluationOrder {
5325 ///! No language constraints on evaluation order.
5326 Default,
5327 ///! Language semantics require left-to-right evaluation.
5329 ///! Language semantics require right-to-left evaluation.
5331 };
5332
5333 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
5334 // an ObjCMethodDecl.
5336 llvm::PointerUnion<const FunctionProtoType *, const ObjCMethodDecl *> P;
5337
5340 };
5341
5343 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
5345 unsigned ParamsToSkip = 0,
5347
5348 /// EmitPointerWithAlignment - Given an expression with a pointer type,
5349 /// emit the value and compute our best estimate of the alignment of the
5350 /// pointee.
5351 ///
5352 /// \param BaseInfo - If non-null, this will be initialized with
5353 /// information about the source of the alignment and the may-alias
5354 /// attribute. Note that this function will conservatively fall back on
5355 /// the type when it doesn't recognize the expression and may-alias will
5356 /// be set to false.
5357 ///
5358 /// One reasonable way to use this information is when there's a language
5359 /// guarantee that the pointer must be aligned to some stricter value, and
5360 /// we're simply trying to ensure that sufficiently obvious uses of under-
5361 /// aligned objects don't get miscompiled; for example, a placement new
5362 /// into the address of a local variable. In such a case, it's quite
5363 /// reasonable to just ignore the returned alignment when it isn't from an
5364 /// explicit source.
5365 Address
5366 EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo = nullptr,
5367 TBAAAccessInfo *TBAAInfo = nullptr,
5368 KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
5369
5370 /// If \p E references a parameter with pass_object_size info or a constant
5371 /// array size modifier, emit the object size divided by the size of \p EltTy.
5372 /// Otherwise return null.
5373 llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy);
5374
5375 void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
5376
5378 llvm::Function *Function;
5380 std::optional<StringRef> Architecture;
5381
5382 FMVResolverOption(llvm::Function *F, ArrayRef<StringRef> Feats,
5383 std::optional<StringRef> Arch = std::nullopt)
5384 : Function(F), Features(Feats), Architecture(Arch) {}
5385 };
5386
5387 // Emits the body of a multiversion function's resolver. Assumes that the
5388 // options are already sorted in the proper order, with the 'default' option
5389 // last (if it exists).
5390 void EmitMultiVersionResolver(llvm::Function *Resolver,
5392 void EmitX86MultiVersionResolver(llvm::Function *Resolver,
5394 void EmitAArch64MultiVersionResolver(llvm::Function *Resolver,
5396 void EmitRISCVMultiVersionResolver(llvm::Function *Resolver,
5398
5399private:
5400 QualType getVarArgType(const Expr *Arg);
5401
5402 void EmitDeclMetadata();
5403
5404 BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
5405 const AutoVarEmission &emission);
5406
5407 void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
5408
5409 llvm::Value *GetValueForARMHint(unsigned BuiltinID);
5410 llvm::Value *EmitX86CpuIs(const CallExpr *E);
5411 llvm::Value *EmitX86CpuIs(StringRef CPUStr);
5412 llvm::Value *EmitX86CpuSupports(const CallExpr *E);
5413 llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
5414 llvm::Value *EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask);
5415 llvm::Value *EmitX86CpuInit();
5416 llvm::Value *FormX86ResolverCondition(const FMVResolverOption &RO);
5417 llvm::Value *EmitAArch64CpuInit();
5418 llvm::Value *FormAArch64ResolverCondition(const FMVResolverOption &RO);
5419 llvm::Value *EmitAArch64CpuSupports(const CallExpr *E);
5420 llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs);
5421};
5422
5425 if (!needsSaving(value)) return saved_type(value, false);
5426
5427 // Otherwise, we need an alloca.
5428 auto align = CharUnits::fromQuantity(
5429 CGF.CGM.getDataLayout().getPrefTypeAlign(value->getType()));
5430 Address alloca =
5431 CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
5432 CGF.Builder.CreateStore(value, alloca);
5433
5434 return saved_type(alloca.emitRawPointer(CGF), true);
5435}
5436
5438 saved_type value) {
5439 // If the value says it wasn't saved, trust that it's still dominating.
5440 if (!value.getInt()) return value.getPointer();
5441
5442 // Otherwise, it should be an alloca instruction, as set up in save().
5443 auto alloca = cast<llvm::AllocaInst>(value.getPointer());
5444 return CGF.Builder.CreateAlignedLoad(alloca->getAllocatedType(), alloca,
5445 alloca->getAlign());
5446}
5447
5448} // end namespace CodeGen
5449
5450// Map the LangOption for floating point exception behavior into
5451// the corresponding enum in the IR.
5452llvm::fp::ExceptionBehavior
5454} // end namespace clang
5455
5456#endif
Enums/classes describing ABI related information about constructors, destructors and thunks.
#define V(N, I)
Definition: ASTContext.h:3443
static bool CanThrow(Expr *E, ASTContext &Ctx)
Definition: CFG.cpp:2686
@ ForDeactivation
Definition: CGCleanup.cpp:1205
const Decl * D
Expr * E
enum clang::sema::@1718::IndirectLocalPathEntry::EntryKind Kind
unsigned OldSize
Defines the clang::Expr interface and subclasses for C++ expressions.
const CFGBlock * Block
Definition: HTMLLogger.cpp:152
#define X(type, name)
Definition: Value.h:144
llvm::MachO::Architecture Architecture
Definition: MachO.h:27
llvm::MachO::Target Target
Definition: MachO.h:51
Defines some OpenMP-specific enums and functions.
SourceRange Range
Definition: SemaObjC.cpp:758
VarDecl * Variable
Definition: SemaObjC.cpp:757
SourceLocation Loc
Definition: SemaObjC.cpp:759
const char * Data
This file defines OpenACC AST classes for statement-level contructs.
This file defines OpenMP AST classes for executable directives and clauses.
C Language Family Type Representation.
StateNode * Previous
#define bool
Definition: amdgpuintrin.h:20
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4224
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition: Expr.h:6986
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2718
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3577
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3127
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6678
Attr - This represents one attribute.
Definition: Attr.h:43
Represents an attribute applied to a statement.
Definition: Stmt.h:2107
BinaryConditionalOperator - The GNU extension to the conditional operator which allows the middle ope...
Definition: Expr.h:4324
OpaqueValueExpr * getOpaqueValue() const
getOpaqueValue - Return the opaque value placeholder.
Definition: Expr.h:4362
Expr * getCommon() const
getCommon - Return the common expression, written to the left of the condition.
Definition: Expr.h:4359
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3909
static bool isLogicalOp(Opcode Opc)
Definition: Expr.h:4042
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:6414
BreakStmt - This represents a break.
Definition: Stmt.h:3007
Represents a call to a CUDA kernel function.
Definition: ExprCXX.h:231
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1491
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2553
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1268
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1375
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition: ExprCXX.h:2498
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2817
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition: ExprCXX.h:478
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition: ExprCXX.h:1737
Represents a call to a member function that may be written either with member call syntax (e....
Definition: ExprCXX.h:176
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2078
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition: ExprCXX.h:2241
A call to an overloaded operator written using operator syntax.
Definition: ExprCXX.h:81
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2617
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
Represents a C++ temporary.
Definition: ExprCXX.h:1457
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1206
CXXTryStmt - A C++ try block, including all handlers.
Definition: StmtCXX.h:69
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition: ExprCXX.h:1066
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
Describes the capture of either a variable, or 'this', or variable-length array type.
Definition: Stmt.h:3797
This captures a statement into a function.
Definition: Stmt.h:3784
CaseStmt - Represent a case statement.
Definition: Stmt.h:1828
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3547
const CXXBaseSpecifier *const * path_const_iterator
Definition: Expr.h:3614
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
Represents a 'co_await' expression.
Definition: ExprCXX.h:5191
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
llvm::Value * getBasePointer() const
Definition: Address.h:193
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
bool hasOffset() const
Definition: Address.h:242
void setAlignment(CharUnits Value)
Definition: Address.h:191
llvm::Value * getOffset() const
Definition: Address.h:244
void replaceBasePointer(llvm::Value *P)
This function is used in situations where the caller is doing some sort of opaque "laundering" of the...
Definition: Address.h:181
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:856
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:896
A pair of helper functions for a __block variable.
Information about the layout of a __block variable.
Definition: CGBlocks.h:136
CGBlockInfo - Information to generate a block literal.
Definition: CGBlocks.h:156
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
Definition: CGBuilder.h:158
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:128
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
All available information about a concrete callee.
Definition: CGCall.h:63
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
An abstract representation of regular/ObjC call/message targets.
const ParmVarDecl * getParamDecl(unsigned I) const
ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
Address getAllocatedAddress() const
Returns the raw, allocated address, which is not necessarily the address of the object itself.
RawAddress getOriginalAllocatedAddress() const
Returns the address for the original alloca instruction.
Address getObjectAddress(CodeGenFunction &CGF) const
Returns the address of the object within this declaration.
API for captured statement code generation.
static bool classof(const CGCapturedStmtInfo *)
llvm::SmallDenseMap< const VarDecl *, FieldDecl * > getCaptureFields()
Get the CaptureFields.
CGCapturedStmtInfo(CapturedRegionKind K=CR_Default)
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
CGCapturedStmtInfo(const CapturedStmt &S, CapturedRegionKind K=CR_Default)
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
RAII for correct setting/restoring of CapturedStmtInfo.
CGCapturedStmtRAII(CodeGenFunction &CGF, CGCapturedStmtInfo *NewCapturedStmtInfo)
CXXDefaultInitExprScope(CodeGenFunction &CGF, const CXXDefaultInitExpr *E)
void Emit(CodeGenFunction &CGF, Flags flags) override
Emit the cleanup.
CallLifetimeEnd(RawAddress addr, llvm::Value *size)
An object to manage conditionally-evaluated expressions.
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const
A scope within which we are constructing the fields of an object which might use a CXXDefaultInitExpr...
FieldConstructionScope(CodeGenFunction &CGF, Address This)
A class controlling the emission of a finally block.
void enter(CodeGenFunction &CGF, const Stmt *Finally, llvm::FunctionCallee beginCatchFn, llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn)
Enters a finally block for an implementation using zero-cost exceptions.
InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:717
LexicalScope(CodeGenFunction &CGF, SourceRange Range)
Enter a new cleanup scope.
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
~LexicalScope()
Exit this cleanup scope, emitting any accumulated cleanups.
RAII for preserving necessary info during inlined region body codegen.
InlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, llvm::BasicBlock &FiniBB)
void Emit(CodeGenFunction &CGF, Flags) override
Emit the cleanup.
RAII for preserving necessary info during Outlined region body codegen.
OutlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, llvm::BasicBlock &RetBB)
Controls insertion of cancellation exit blocks in worksharing constructs.
OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel)
Save/restore original map of previously emitted local vars in case when we need to duplicate emission...
The class used to assign some variables some temporarily addresses.
bool apply(CodeGenFunction &CGF)
Applies new addresses to the list of the variables.
void restore(CodeGenFunction &CGF)
Restores original addresses of the variables.
bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD, Address TempAddr)
Sets the address of the variable LocalVD to be TempAddr in function CGF.
The scope used to remap some variables as private in the OpenMP loop body (or other captured region e...
void restoreMap()
Restore all mapped variables w/o clean up.
bool Privatize()
Privatizes local variables previously registered as private.
bool isGlobalVarCaptured(const VarDecl *VD) const
Checks if the global variable is captured in current function.
OMPPrivateScope(CodeGenFunction &CGF)
Enter a new OpenMP private scope.
~OMPPrivateScope()
Exit scope - all the mapped variables are restored.
bool addPrivate(const VarDecl *LocalVD, Address Addr)
Registers LocalVD variable as a private with Addr as the address of the corresponding private variabl...
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
Build the opaque value mapping for an OpaqueValueExpr whose source expression is set to the expressio...
OpaqueValueMapping(CodeGenFunction &CGF, const AbstractConditionalOperator *op)
Build the opaque value mapping for the given conditional operator if it's the GNU ?...
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, RValue rvalue)
OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, LValue lvalue)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
ParentLoopDirectiveForScanRegion(CodeGenFunction &CGF, const OMPExecutableDirective &ParentLoopDirectiveForScan)
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
RunCleanupsScope(CodeGenFunction &CGF)
Enter a new cleanup scope.
~RunCleanupsScope()
Exit this cleanup scope, emitting any accumulated cleanups.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
An RAII object to record that we're evaluating a statement expression.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, ObjCMethodDecl *MD, bool ctor)
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, QualType::DestructionKind dtorKind)
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
void GenerateCXXGlobalInitFunc(llvm::Function *Fn, ArrayRef< llvm::Function * > CXXThreadLocals, ConstantAddress Guard=ConstantAddress::invalid())
GenerateCXXGlobalInitFunc - Generates code for initializing global variables.
llvm::Value * EmitPointerAuthAuth(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
RValue EmitLoadOfGlobalRegLValue(LValue LV)
void EmitGotoStmt(const GotoStmt &S)
void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, Address This, Address Src, const CXXConstructExpr *E)
void EmitDestructorBody(FunctionArgList &Args)
void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, OMPTaskDataTy &Data)
void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD)
llvm::BasicBlock * getEHDispatchBlock(EHScopeStack::stable_iterator scope)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete, llvm::Value *CompletePtr, QualType ElementType)
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
void EmitBranchToCounterBlock(const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount=0, Stmt::Likelihood LH=Stmt::LH_None, const Expr *CntrIdx=nullptr)
EmitBranchToCounterBlock - Emit a conditional branch to a new block that increments a profile counter...
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
void EmitARCDestroyWeak(Address addr)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags, bool LoadBlockVarAddr, bool CanThrow)
Enter a cleanup to destroy a __block variable.
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPAggregateAssign(Address DestAddr, Address SrcAddr, QualType OriginalType, const llvm::function_ref< void(Address, Address)> CopyGen)
Perform element by element copying of arrays with type OriginalType from SrcAddr to DestAddr using co...
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Emit a check that V is the address of storage of the appropriate size and alignment for an object of ...
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
void EmitAsanPrologueOrEpilogue(bool Prologue)
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitCastLValue(const CastExpr *E)
void EnterSEHTryStmt(const SEHTryStmt &S)
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl)
llvm::Value * EmitSVEPredicateCast(llvm::Value *Pred, llvm::ScalableVectorType *VTy)
Address getExceptionSlot()
Returns a pointer to the function's exception object and selector slot, which is assigned in every la...
RawAddress CreateMemTemp(QualType T, CharUnits Align, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
bool isBinaryLogicalOp(const Expr *E) const
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
void VolatilizeTryBlocks(llvm::BasicBlock *BB, llvm::SmallPtrSet< llvm::BasicBlock *, 10 > &V)
void EmitLambdaInAllocaImplFn(const CXXMethodDecl *CallOp, const CGFunctionInfo **ImplFnInfo, llvm::Function **ImplFn)
llvm::Function * GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF, const SEHFinallyStmt &Finally)
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
llvm::Function * GenerateSEHFilterFunction(CodeGenFunction &ParentCGF, const SEHExceptStmt &Except)
static Destroyer destroyNonTrivialCStruct
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee, const ThunkInfo *Thunk, bool IsUnprototyped)
void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, CXXCtorType CtorType, const FunctionArgList &Args, SourceLocation Loc)
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
static bool cxxDestructorCanThrow(QualType T)
Check if T is a C++ class that has a destructor that can throw.
void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, const FunctionArgList &Args)
llvm::Function * GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk)
SanitizerSet SanOpts
Sanitizers enabled for this function.
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
LValue EmitCoawaitLValue(const CoawaitExpr *E)
llvm::BasicBlock * getInvokeDestImpl()
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
void EmitOMPCopy(QualType OriginalType, Address DestAddr, Address SrcAddr, const VarDecl *DestVD, const VarDecl *SrcVD, const Expr *Copy)
Emit proper copying of data from one variable to another.
void EmitIfStmt(const IfStmt &S)
void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator, CallArgList &CallArgs, const CGFunctionInfo *CallOpFnInfo=nullptr, llvm::Constant *CallOpFn=nullptr)
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
void PushDestructorCleanup(const CXXDestructorDecl *Dtor, QualType T, Address Addr)
PushDestructorCleanup - Push a cleanup to call the complete-object variant of the given destructor on...
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope)
void EmitARCMoveWeak(Address dst, Address src)
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
void EmitAArch64MultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
void EmitOMPReductionClauseInit(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope, bool ForInscan=false)
Emit initial code for reduction variables.
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitVTableAssumptionLoad(const VPtr &vptr, Address This)
Emit assumption that vptr load == global vtable.
void unprotectFromPeepholes(PeepholeProtection protection)
Address getAsNaturalAddressOf(Address Addr, QualType PointeeTy)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter, const Stmt *OutlinedStmt)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
SmallVector< Address, 1 > SEHCodeSlotStack
A stack of exception code slots.
JumpDest getJumpDestInCurrentScope(StringRef Name=StringRef())
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void generateObjCGetterBody(const ObjCImplementationDecl *classImpl, const ObjCPropertyImplDecl *propImpl, const ObjCMethodDecl *GetterMothodDecl, llvm::Constant *AtomicHelperFn)
void EmitAutoVarDecl(const VarDecl &D)
EmitAutoVarDecl - Emit an auto variable declaration.
void GetAArch64SVEProcessedOperands(unsigned BuiltinID, const CallExpr *E, SmallVectorImpl< llvm::Value * > &Ops, SVETypeFlags TypeFlags)
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
llvm::Constant * createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor, llvm::Constant *Addr)
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static void EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDirective &S)
Emit device code for the target teams directive.
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
void callCStructDefaultConstructor(LValue Dst)
void EmitOMPReverseDirective(const OMPReverseDirective &S)
static bool hasScalarEvaluationKind(QualType T)
llvm::Value * EmitObjCAutoreleasePoolPush()
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
llvm::Value * EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx)
void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable, CFITypeCheckKind TCK, SourceLocation Loc)
EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
llvm::Value * EmitARCRetainAutoreleaseNonBlock(llvm::Value *value)
void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr)
const BlockByrefInfo & getBlockByrefInfo(const VarDecl *var)
AwaitSuspendWrapperInfo CurAwaitSuspendWrapper
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
llvm::function_ref< std::pair< llvm::Value *, llvm::Value * >(CodeGenFunction &, const OMPExecutableDirective &S, Address LB, Address UB)> CodeGenDispatchBoundsTy
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
LValue EmitCallExprLValue(const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T)
Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known to be unsigned.
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Address authPointerToPointerCast(Address Ptr, QualType SourceType, QualType DestType)
void EmitCXXTryStmt(const CXXTryStmt &S)
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
std::pair< LValue, llvm::Value * > EmitARCStoreStrong(const BinaryOperator *e, bool ignored)
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
RValue EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method, const CGCallee &Callee, ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E, CallArgList *RtlArgs, llvm::CallBase **CallOrInvoke)
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E)
void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk, bool IsUnprototyped)
Generate a thunk for the given method.
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
llvm::Value * EmitARCRetainAutoreleasedReturnValue(llvm::Value *value)
void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
static void EmitOMPTargetTeamsDistributeDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeDirective &S)
Emit device code for the target teams distribute directive.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp)
static void EmitOMPTargetParallelForSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelForSimdDirective &S)
Emit device code for the target parallel for simd directive.
llvm::Value * EmitObjCAllocWithZone(llvm::Value *value, llvm::Type *returnType)
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, llvm::Value *NumElements, Address ArrayPtr, const CXXConstructExpr *E, bool NewPointerIsChecked, bool ZeroInitialization=false)
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::Value * EmitSVEGatherLoad(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, const ArrayType *ArrayTy, Address ArrayPtr, const CXXConstructExpr *E, bool NewPointerIsChecked, bool ZeroInitialization=false)
void popCatchScope()
popCatchScope - Pops the catch scope at the top of the EHScope stack, emitting any required code (oth...
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
llvm::Value * EmitRISCVCpuSupports(const CallExpr *E)
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual)
Determine whether a base class initialization may overlap some other object.
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
const OMPExecutableDirective * OMPParentLoopDirectiveForScan
Parent loop-based directive for scan directive.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
void EmitOMPTaskDirective(const OMPTaskDirective &S)
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitAnyExprToExn(const Expr *E, Address Addr)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, const Expr *Base, llvm::CallBase **CallOrInvoke)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
llvm::Value * EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void GenerateObjCMethod(const ObjCMethodDecl *OMD)
void EmitOMPUseDevicePtrClause(const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, const llvm::DenseMap< const ValueDecl *, llvm::Value * > CaptureDeviceAddrMap)
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
llvm::Value * EmitARCAutorelease(llvm::Value *value)
llvm::Value * emitPointerAuthResignCall(llvm::Value *Pointer, const CGPointerAuthInfo &CurInfo, const CGPointerAuthInfo &NewInfo)
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
void EmitExtendGCLifetime(llvm::Value *object)
EmitExtendGCLifetime - Given a pointer to an Objective-C object, make sure it survives garbage collec...
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
void EmitOMPDistributeLoop(const OMPLoopDirective &S, const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr)
Emit code for the distribute loop-based directive.
void EmitARCNoopIntrinsicUse(ArrayRef< llvm::Value * > values)
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::Value * EmitSVEMaskedStore(const CallExpr *, SmallVectorImpl< llvm::Value * > &Ops, unsigned BuiltinID)
llvm::Constant * GenerateObjCAtomicGetterCopyHelperFunction(const ObjCPropertyImplDecl *PID)
void callCStructCopyAssignmentOperator(LValue Dst, LValue Src)
void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S)
void callCStructMoveConstructor(LValue Dst, LValue Src)
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty)
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF, llvm::Value *ParentFP, llvm::Value *EntryEBP)
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
llvm::BasicBlock * getEHResumeBlock(bool isCleanup)
static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetDirective &S)
Emit device code for the target directive.
void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr, QualType DeleteTy, llvm::Value *NumElements=nullptr, CharUnits CookieSize=CharUnits())
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
void callCStructCopyConstructor(LValue Dst, LValue Src)
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
llvm::Value * EmitSEHExceptionInfo()
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
llvm::Value * EmitARCLoadWeakRetained(Address addr)
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S)
Emit device code for the target simd directive.
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope, llvm::AtomicOrdering &AO, llvm::SyncScope::ID &SSID)
void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S, OMPPrivateScope &LoopScope)
Emit initial code for loop counters of loop-based directives.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
LValue EmitInitListLValue(const InitListExpr *E)
llvm::Value * EmitARCRetainAutorelease(QualType type, llvm::Value *value)
void emitArrayDestroy(llvm::Value *begin, llvm::Value *end, QualType elementType, CharUnits elementAlign, Destroyer *destroyer, bool checkZeroLength, bool useEHCleanup)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D, bool NoFinals, llvm::Value *IsLastIterCond=nullptr)
Emit final copying of lastprivate values to original variables at the end of the worksharing or simd ...
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, Address Addr, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::Function * generateAwaitSuspendWrapper(Twine const &CoroName, Twine const &SuspendPointName, CoroutineSuspendExpr const &S)
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
SmallVector< llvm::Value *, 8 > ObjCEHValueStack
ObjCEHValueStack - Stack of Objective-C exception values, used for rethrows.
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
void EmitFunctionBody(const Stmt *Body)
VlaSizePair getVLAElements1D(QualType vla)
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
llvm::Value * EmitSVETupleCreate(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType, ArrayRef< llvm::Value * > Ops)
const CodeGen::CGBlockInfo * BlockInfo
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void EmitAggregateCopyCtor(LValue Dest, LValue Src, AggValueSlot::Overlap_t MayOverlap)
llvm::Value * EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable, llvm::Type *VTableTy, uint64_t VTableByteOffset)
Emit a type checked load from the given vtable.
void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
void EmitRISCVMultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
llvm::AllocaInst * EHSelectorSlot
The selector slot.
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
EmitExprAsInit - Emits the code necessary to initialize a location in memory with the given initializ...
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, const Twine &name="")
void emitByrefStructureInit(const AutoVarEmission &emission)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
void callCStructDestructor(LValue Dst)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
llvm::Value * EmitObjCRetainNonBlock(llvm::Value *value, llvm::Type *returnType)
llvm::Value * GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, bool Delegating)
GetVTTParameter - Return the VTT parameter that should be passed to a base constructor/destructor wit...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E)
Emit a call to a constructor inherited from a base class, passing the current constructor's arguments...
llvm::Value * EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType)
Address EmitExtVectorElementLValue(LValue V)
void EmitOMPSimdFinal(const OMPLoopDirective &D, const llvm::function_ref< llvm::Value *(CodeGenFunction &)> CondGen)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_ConstructorCall
Checking the 'this' pointer for a constructor call.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_NonnullAssign
Checking the value assigned to a _Nonnull pointer. Must not be null.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
@ TCK_Upcast
Checking the operand of a cast to a base object.
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
llvm::Value * EmitSMELdrStr(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitContinueStmt(const ContinueStmt &S)
void setCurrentProfileCount(uint64_t Count)
Set the profiler's current count.
llvm::BasicBlock * getTerminateFunclet()
getTerminateLandingPad - Return a cleanup funclet that just calls terminate.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
llvm::Value * EmitARCStoreStrongCall(Address addr, llvm::Value *value, bool resultIgnored)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, bool isInit=false, bool isNontemporal=false)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
llvm::Value * EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx, const llvm::ElementCount &Count)
VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass)
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
llvm::Type * ConvertTypeForMem(QualType T)
llvm::Function * createTLSAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor, llvm::Constant *Addr, llvm::FunctionCallee &AtExit)
Address EmitCheckedInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *elementType, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align, const Twine &Name="")
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
llvm::Value * EmitARCUnsafeUnretainedScalarExpr(const Expr *expr)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitAutoVarInit(const AutoVarEmission &emission)
llvm::BasicBlock * getUnreachableBlock()
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy)
Emit an aggregate assignment.
void GenerateOpenMPCapturedVars(const CapturedStmt &S, SmallVectorImpl< llvm::Value * > &CapturedVars)
void EmitNonNullArgCheck(Address Addr, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
bool isPointerKnownNonNull(const Expr *E)
RawAddress CreateMemTempWithoutCast(QualType T, CharUnits Align, const Twine &Name="tmp")
llvm::Value * EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy, SmallVectorImpl< llvm::Value * > &Ops, unsigned BuiltinID, bool IsZExtReturn)
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
llvm::SmallVector< const JumpDest *, 2 > SEHTryEpilogueStack
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
DominatingValue< T >::saved_type saveValueInCond(T value)
const llvm::function_ref< void(CodeGenFunction &, llvm::Function *, const OMPTaskDataTy &)> TaskGenTy
static void EmitOMPTargetTeamsGenericLoopDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsGenericLoopDirective &S)
Emit device code for the target teams loop directive.
llvm::Value * ExceptionSlot
The exception slot.
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
llvm::Value * EmitARCRetainBlock(llvm::Value *value, bool mandatory)
QualType TypeOfSelfObject()
TypeOfSelfObject - Return type of object that this self represents.
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
llvm::Value * EmitSVEDupX(llvm::Value *Scalar)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
void EmitAttributedStmt(const AttributedStmt &S)
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
llvm::BasicBlock * OMPBeforeScanBlock
void registerGlobalDtorWithLLVM(const VarDecl &D, llvm::FunctionCallee fn, llvm::Constant *addr)
Registers the dtor using 'llvm.global_dtors' for platforms that do not support an 'atexit()' function...
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
llvm::SmallPtrSet< const CXXRecordDecl *, 4 > VisitedVirtualBasesSetTy
void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType, llvm::Type *ElementTy, Address NewPtr, llvm::Value *NumElements, llvm::Value *AllocSizeWithoutCookie)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
llvm::Value * EmitPointerAuthSign(const CGPointerAuthInfo &Info, llvm::Value *Pointer)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
void pushCleanupAndDeferDeactivation(CleanupKind Kind, As... A)
void EmitAtomicInit(Expr *E, LValue lvalue)
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
static std::string getNonTrivialDestructorStr(QualType QT, CharUnits Alignment, bool IsVolatile, ASTContext &Ctx)
llvm::DenseMap< const Decl *, Address > DeclMapTy
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we're intending to store to the side, but which will prob...
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
llvm::Value * EmitARCLoadWeak(Address addr)
const TargetInfo & getTarget() const
void initFullExprCleanup()
Set up the last cleanup that was pushed as a conditional full-expression cleanup.
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeParallelForDirective &S)
void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy, SourceLocation Loc)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
void EmitOMPInnerLoop(const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, const Expr *IncExpr, const llvm::function_ref< void(CodeGenFunction &)> BodyGen, const llvm::function_ref< void(CodeGenFunction &)> PostIncGen)
Emit inner loop of the worksharing/simd construct.
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
llvm::Value * EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress, llvm::Value *Discriminator)
Create the discriminator from the storage address and the entity hash.
llvm::Constant * GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo)
llvm::Value * vectorWrapScalar16(llvm::Value *Op)
llvm::Function * LookupNeonLLVMIntrinsic(unsigned IntrinsicID, unsigned Modifier, llvm::Type *ArgTy, const CallExpr *E)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
void EmitLabelStmt(const LabelStmt &S)
void emitDestroy(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
llvm::Value * EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType)
LValue EmitVAArgExprLValue(const VAArgExpr *E)
llvm::Value * EmitSEHExceptionCode()
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
llvm::Value * EmitObjCCollectionLiteral(const Expr *E, const ObjCMethodDecl *MethodWithObjects)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void GenerateCXXGlobalCleanUpFunc(llvm::Function *Fn, ArrayRef< std::tuple< llvm::FunctionType *, llvm::WeakTrackingVH, llvm::Constant * > > DtorsOrStermFinalizers)
GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global variables.
void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit)
Emit code in this function to perform a guarded variable initialization.
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV, bool PerformInit)
EmitCXXGlobalVarDeclInit - Create the initializer for a C++ variable with global storage.
LValue EmitCoyieldLValue(const CoyieldExpr *E)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
llvm::Value * EmitObjCThrowOperand(const Expr *expr)
void EmitX86MultiVersionResolver(llvm::Function *Resolver, ArrayRef< FMVResolverOption > Options)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
llvm::Value * emitPointerAuthResign(llvm::Value *Pointer, QualType PointerType, const CGPointerAuthInfo &CurAuthInfo, const CGPointerAuthInfo &NewAuthInfo, bool IsKnownNonNull)
void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable, CFITypeCheckKind TCK, SourceLocation Loc)
EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for RD using llvm....
void EmitOMPSingleDirective(const OMPSingleDirective &S)
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
ComplexPairTy EmitPromotedComplexExpr(const Expr *E, QualType PromotionType)
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
void initFullExprCleanupWithFlag(RawAddress ActiveFlag)
llvm::Value * EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::BasicBlock * getInvokeDest()
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
llvm::CanonicalLoopInfo * EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth)
Emit the Stmt S and return its topmost canonical loop, if any.
llvm::Value * EmitRISCVCpuSupports(ArrayRef< StringRef > FeaturesStrs)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
llvm::Value * LoadObjCSelf()
LoadObjCSelf - Load the value of self.
bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD)
Returns whether we should perform a type checked load when loading a virtual function for virtual cal...
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO, bool IsVolatile, bool isInit)
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
void EmitARCCopyWeak(Address dst, Address src)
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
void pushSEHCleanup(CleanupKind kind, llvm::Function *FinallyFunc)
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void PushDestructorCleanup(QualType T, Address Addr)
PushDestructorCleanup - Push a cleanup to call the complete-object destructor of an object of the giv...
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, AggValueSlot ThisAVS, const CXXConstructExpr *E)
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD, CXXDtorType Type, const CXXRecordDecl *RD)
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
bool ShouldSkipSanitizerInstrumentation()
ShouldSkipSanitizerInstrumentation - Return true if the current function should not be instrumented w...
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, llvm::Value *Index, QualType IndexType, QualType IndexedType, bool Accessed)
llvm::Value * EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty)
llvm::Value * EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags, SmallVectorImpl< llvm::Value * > &Ops, unsigned BuiltinID)
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
llvm::Value * EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty, const llvm::CmpInst::Predicate Fp, const llvm::CmpInst::Predicate Ip, const llvm::Twine &Name="")
void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum, llvm::Value *ptr)
void defaultInitNonTrivialCStructVar(LValue Dst)
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitLoadOfExtVectorElementLValue(LValue V)
llvm::ScalableVectorType * getSVEType(const SVETypeFlags &TypeFlags)
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke=nullptr, bool IsMustTail=false)
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
void EmitSwitchStmt(const SwitchStmt &S)
LValue EmitLValueForLambdaField(const FieldDecl *Field, llvm::Value *ThisValue)
bool isTrivialInitializer(const Expr *Init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF, Address ParentVar, llvm::Value *ParentFP)
Recovers the address of a local in a parent function.
const FieldDecl * FindFlexibleArrayMemberFieldAndOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FAMDecl, uint64_t &Offset)
void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn, llvm::Constant *addr)
Call atexit() with a function that passes the given argument to the given function.
llvm::Value * EmitRISCVCpuIs(const CallExpr *E)
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
Address EmitVAListRef(const Expr *E)
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
Address emitBlockByrefAddress(Address baseAddr, const BlockByrefInfo &info, bool followForward, const llvm::Twine &name)
LValue EmitDeclRefLValue(const DeclRefExpr *E)
llvm::Value * EmitLoadOfScalar(LValue lvalue, SourceLocation Loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Same as MakeAddrLValue above except that the pointer is known to be unsigned.
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
void EmitSEHTryStmt(const SEHTryStmt &S)
void maybeCreateMCDCCondBitmap()
Allocate a temp value on the stack that MCDC can use to track condition results.
void EmitOMPInteropDirective(const OMPInteropDirective &S)
llvm::Value * EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty, bool negateForRightShift)
void ExitSEHTryStmt(const SEHTryStmt &S)
llvm::Constant * GenerateCopyHelperFunction(const CGBlockInfo &blockInfo)
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind)
Emit final update of reduction values to original variables at the end of the directive.
llvm::Value * unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub)
Call unatexit() with function dtorStub.
SmallVector< llvm::IntrinsicInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
llvm::BasicBlock * OMPScanDispatch
llvm::BasicBlock * getTerminateLandingPad()
getTerminateLandingPad - Return a landing pad that just calls terminate.
llvm::BasicBlock * getTerminateHandler()
getTerminateHandler - Return a handler (not a landing pad, just a catch handler) that just calls term...
void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr)
llvm::Value * EmitSVEMovl(const SVETypeFlags &TypeFlags, llvm::ArrayRef< llvm::Value * > Ops, unsigned BuiltinID)
llvm::function_ref< std::pair< LValue, LValue >(CodeGenFunction &, const OMPExecutableDirective &S)> CodeGenLoopBoundsTy
llvm::Value * EmitARCRetainAutoreleaseScalarExpr(const Expr *expr)
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
const TargetCodeGenInfo & getTargetHooks() const
void setBeforeOutermostConditional(llvm::Value *value, Address addr, CodeGenFunction &CGF)
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
LValue EmitPredefinedLValue(const PredefinedExpr *E)
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor, CXXCtorType CtorType, bool ForVirtualBase, bool Delegating, CallArgList &Args)
Emit a call to an inheriting constructor (that is, one that invokes a constructor inherited from a ba...
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
llvm::Type * getEltType(const SVETypeFlags &TypeFlags)
CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false)
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, OMPTargetDataInfo &InputInfo)
void EmitDeclStmt(const DeclStmt &S)
void EmitOMPScopeDirective(const OMPScopeDirective &S)
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock=false)
static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor)
llvm::Function * GenerateBlockFunction(GlobalDecl GD, const CGBlockInfo &Info, const DeclMapTy &ldm, bool IsLambdaConversionToBlock, bool BuildGlobalBlock)
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
static void EmitOMPTargetParallelForDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelForDirective &S)
Emit device code for the target parallel for directive.
llvm::Value * EmitSVEPMull(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned BuiltinID)
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
void EmitCoroutineBody(const CoroutineBodyStmt &S)
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, Address This, CallArgList &Args, AggValueSlot::Overlap_t Overlap, SourceLocation Loc, bool NewPointerIsChecked, llvm::CallBase **CallOrInvoke=nullptr)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy, QualType RTy)
void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind, RawAddress ActiveFlag, As... A)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub)
Call atexit() with function dtorStub.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
void EmitOMPSimdInit(const OMPLoopDirective &D)
Helpers for the OpenMP loop directives.
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
llvm::Value * EmitSVEScatterStore(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
void EmitConstructorBody(FunctionArgList &Args)
void SetFastMathFlags(FPOptions FPFeatures)
Set the codegen fast-math flags.
int ExpectedOMPLoopDepth
Number of nested loop to be consumed by the last surrounding loop-associated directive.
void EmitVarDecl(const VarDecl &D)
EmitVarDecl - Emit a local variable declaration.
llvm::Value * EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value)
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
CGCallee BuildAppleKextVirtualCall(const CXXMethodDecl *MD, NestedNameSpecifier *Qual, llvm::Type *Ty)
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This)
Emit assumption load for all bases.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
CGCallee EmitCallee(const Expr *E)
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
llvm::Constant * GenerateObjCAtomicSetterCopyHelperFunction(const ObjCPropertyImplDecl *PID)
ComplexPairTy EmitUnPromotedValue(ComplexPairTy result, QualType PromotionType)
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeSimdDirective &S)
Emit device code for the target teams distribute simd directive.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD)
std::pair< LValue, LValue > EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty)
SmallVector< llvm::CanonicalLoopInfo *, 4 > OMPLoopNestStack
List of recently emitted OMPCanonicalLoops.
bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB, const CodeGenLoopBoundsTy &CodeGenLoopBounds, const CodeGenDispatchBoundsTy &CGDispatchBounds)
Emit code for the worksharing loop-based directive.
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
llvm::Value * LoadCXXVTT()
LoadCXXVTT - Load the VTT parameter to base constructors/destructors have virtual bases.
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
void EmitOMPLinearClause(const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope)
Emit initial code for linear clauses.
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
void StartThunk(llvm::Function *Fn, GlobalDecl GD, const CGFunctionInfo &FnInfo, bool IsUnprototyped)
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr)
LValue EmitMemberExpr(const MemberExpr *E)
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
Address GetAddressOfDirectBaseInCompleteClass(Address Value, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual)
GetAddressOfBaseOfCompleteClass - Convert the given pointer to a complete class to the given direct b...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
llvm::Value * SEHInfo
Value returned by __exception_info intrinsic.
llvm::Value * BuildVector(ArrayRef< llvm::Value * > Ops)
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
ConstantEmission tryEmitAsConstant(const MemberExpr *ME)
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void callCStructMoveAssignmentOperator(LValue Dst, LValue Src)
void EmitAutoVarCleanups(const AutoVarEmission &emission)
llvm::GlobalVariable * AddInitializerToStaticVarDecl(const VarDecl &D, llvm::GlobalVariable *GV)
AddInitializerToStaticVarDecl - Add the initializer for 'D' to the global variable that has already b...
llvm::Value * EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
void EmitOMPTileDirective(const OMPTileDirective &S)
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
bool EmitOMPLinearClauseInit(const OMPLoopDirective &D)
Emit initial code for linear variables.
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
llvm::BasicBlock * EmitLandingPad()
Emits a landing pad for the current EH stack.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD, llvm::Value *VTable, SourceLocation Loc)
If whole-program virtual table optimization is enabled, emit an assumption that VTable is a member of...
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit)
Helper for the OpenMP loop directives.
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
VlaSizePair getVLASize(QualType vla)
llvm::Value * EmitSVEPredicateTupleCast(llvm::Value *PredTuple, llvm::StructType *Ty)
llvm::Value * EmitObjCMRRAutoreleasePoolPush()
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size)
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitOMPLinearClauseFinal(const OMPLoopDirective &D, const llvm::function_ref< llvm::Value *(CodeGenFunction &)> CondGen)
Emit final code for linear clauses.
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
Address GetAddrOfBlockDecl(const VarDecl *var)
CodeGenTypes & getTypes() const
void EmitWritebacks(const CallArgList &Args)
EmitWriteback - Emit callbacks for function.
void EmitARCInitWeak(Address addr, llvm::Value *value)
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
llvm::BasicBlock * OMPScanExitBlock
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeParallelForSimdDirective &S)
Emit device code for the target teams distribute parallel for simd directive.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
llvm::Value * EmitRISCVCpuIs(StringRef CPUStr)
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
RawAddress CreateTempAlloca(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr, RawAddress *Alloca=nullptr)
void EmitOMPUseDeviceAddrClause(const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, const llvm::DenseMap< const ValueDecl *, llvm::Value * > CaptureDeviceAddrMap)
void generateObjCSetterBody(const ObjCImplementationDecl *classImpl, const ObjCPropertyImplDecl *propImpl, llvm::Constant *AtomicHelperFn)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
llvm::Value * EmitSMEReadWrite(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::Value * EmitSMELd1St1(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
ActivateCleanupBlock - Activates an initially-inactive cleanup.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, size_t OldLifetimeExtendedStackSize, std::initializer_list< llvm::Value ** > ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added,...
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D, llvm::GlobalVariable *Addr, bool PerformInit)
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
LValue EmitStringLiteralLValue(const StringLiteral *E)
void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt, bool IsFilter)
Scan the outlined statement for captures from the parent function.
static Destroyer destroyARCStrongPrecise
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type, FunctionArgList &Args)
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc, llvm::AtomicOrdering AO, bool IsVolatile=false, AggValueSlot slot=AggValueSlot::ignored())
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
llvm::Value * EmitSVEStructLoad(const SVETypeFlags &TypeFlags, SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD, CallArgList &CallArgs)
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
LValue EmitOMPSharedLValue(const Expr *E)
Emits the lvalue for the expression with possibly captured variable.
void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, llvm::Value *Address)
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
void processInReduction(const OMPExecutableDirective &S, OMPTaskDataTy &Data, CodeGenFunction &CGF, const CapturedStmt *CS, OMPPrivateScope &Scope)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters.
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
void pushStackRestore(CleanupKind kind, Address SPMem)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
llvm::Value * EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt, llvm::Type *Ty, bool usgn, const char *name)
void GenerateObjCSetter(ObjCImplementationDecl *IMP, const ObjCPropertyImplDecl *PID)
GenerateObjCSetter - Synthesize an Objective-C property setter function for the given property.
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
bool EmitOMPCopyinClause(const OMPExecutableDirective &D)
Emit code for copyin clause in D directive.
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
SmallVector< llvm::Type *, 2 > getSVEOverloadTypes(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType, ArrayRef< llvm::Value * > Ops)
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPPrivateClause(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
llvm::Function * GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, SourceLocation Loc)
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, bool isNontemporal=false)
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr)
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
static void EmitOMPTargetParallelGenericLoopDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelGenericLoopDirective &S)
Emit device code for the target parallel loop directive.
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::function_ref< void(CodeGenFunction &, SourceLocation, const unsigned, const bool)> CodeGenOrderedTy
llvm::Value * EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
void GenerateObjCGetter(ObjCImplementationDecl *IMP, const ObjCPropertyImplDecl *PID)
GenerateObjCGetter - Synthesize an Objective-C property getter function.
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
const CGFunctionInfo * CurFnInfo
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
llvm::Value * EmitSVEStructStore(const SVETypeFlags &TypeFlags, SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::BasicBlock * getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope)
void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr)
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
void pushKmpcAllocFree(CleanupKind Kind, std::pair< llvm::Value *, llvm::Value * > AddrSizePair)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
llvm::Value * EmitSEHAbnormalTermination()
void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
void EmitCoreturnStmt(const CoreturnStmt &S)
void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type)
EnterDtorCleanups - Enter the cleanups necessary to complete the given phase of destruction for a des...
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
llvm::BasicBlock * OMPAfterScanBlock
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
void EmitOMPErrorDirective(const OMPErrorDirective &S)
static Destroyer destroyARCStrongImprecise
void EmitOMPSectionDirective(const OMPSectionDirective &S)
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
static void EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelDirective &S)
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::Value * EmitSVEAllTruePred(const SVETypeFlags &TypeFlags)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock=false)
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
llvm::Value * EmitObjCAlloc(llvm::Value *value, llvm::Type *returnType)
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
LValue EmitStmtExprLValue(const StmtExpr *E)
llvm::Instruction * CurrentFuncletPad
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
llvm::Type * SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags)
SVEBuiltinMemEltTy - Returns the memory element type for this memory access builtin.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst, const CallExpr *E)
bool checkIfFunctionMustProgress()
Returns true if a function must make progress, which means the mustprogress attribute can be added.
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo)
Emits the alloca and debug information for the size expressions for each dimension of an array.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
llvm::SmallVector< VPtr, 4 > VPtrsVector
llvm::Value * EmitSMEZero(const SVETypeFlags &TypeFlags, llvm::SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
llvm::Value * getSelectorFromSlot()
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope)
Emit initial code for lastprivate variables.
static std::string getNonTrivialCopyConstructorStr(QualType QT, CharUnits Alignment, bool IsVolatile, ASTContext &Ctx)
void InitializeVTablePointers(const CXXRecordDecl *ClassDecl)
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
llvm::Value * EmitCommonNeonBuiltinExpr(unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic, const char *NameHint, unsigned Modifier, const CallExpr *E, SmallVectorImpl< llvm::Value * > &Ops, Address PtrOp0, Address PtrOp1, llvm::Triple::ArchType Arch)
void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase, CharUnits OffsetFromNearestVBase, bool BaseIsNonVirtualPrimaryBase, const CXXRecordDecl *VTableClass, VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs)
llvm::function_ref< void(CodeGenFunction &, const OMPLoopDirective &, JumpDest)> CodeGenLoopTy
void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags, bool CanThrow)
llvm::Value * EmitNeonCall(llvm::Function *F, SmallVectorImpl< llvm::Value * > &O, const char *name, unsigned shift=0, bool rightshift=false)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void InitializeVTablePointer(const VPtr &vptr)
Initialize the vtable pointer of the given subobject.
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Value *Chain=nullptr, llvm::CallBase **CallOrInvoke=nullptr, CGFunctionInfo const **ResolvedFnInfo=nullptr)
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
Address emitAddrOfRealComponent(Address complex, QualType complexType)
void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
llvm::ScalableVectorType * getSVEPredType(const SVETypeFlags &TypeFlags)
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
llvm::Value * getExceptionFromSlot()
Returns the contents of the function's exception object and selector slots.
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
llvm::Value * EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags, SmallVectorImpl< llvm::Value * > &Ops, unsigned IntID)
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef< Address > args, const Twine &name="")
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
RValue EmitAtomicExpr(AtomicExpr *E)
RValue EmitCXXDestructorCall(GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy, llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
CGPointerAuthInfo EmitPointerAuthInfo(const PointerAuthSchema &Schema, llvm::Value *StorageAddress, GlobalDecl SchemaDecl, QualType SchemaType)
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit, llvm::BasicBlock *InitBlock, llvm::BasicBlock *NoInitBlock, GuardKind Kind, const VarDecl *D)
Emit a branch to select whether or not to perform guarded initialization.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
CallType * addControlledConvergenceToken(CallType *Input)
LValue EmitLoadOfReferenceLValue(Address RefAddr, QualType RefTy, AlignmentSource Source=AlignmentSource::Type)
std::pair< bool, RValue > EmitOMPAtomicSimpleUpdateExpr(LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, llvm::AtomicOrdering AO, SourceLocation Loc, const llvm::function_ref< RValue(RValue)> CommonGen)
Emit atomic update code for constructs: X = X BO E or X = E BO E.
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr, llvm::FunctionCallee Callee)
Emit a musttail call for a thunk with a potentially adjusted this pointer.
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void pushDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
llvm::Value * EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags, ArrayRef< llvm::Value * > Ops)
llvm::Type * ConvertType(const TypeDecl *T)
This class organizes the cross-function state that is used while generating LLVM code.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGPointerAuthInfo getPointerAuthInfoForPointeeType(QualType type)
const llvm::DataLayout & getDataLayout() const
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
A specialization of Address that requires the address to be an LLVM Constant.
Definition: Address.h:294
static ConstantAddress invalid()
Definition: Address.h:302
DominatingValue< Address >::saved_type AggregateAddr
static saved_type save(CodeGenFunction &CGF, RValue value)
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:141
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:203
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
A stack of scopes which respond to exceptions, including cleanups and catch blocks.
Definition: EHScopeStack.h:94
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
void pushCleanupTuple(CleanupKind Kind, std::tuple< As... > A)
Push a lazily-created cleanup on the stack. Tuple version.
Definition: EHScopeStack.h:295
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:382
LValue - This represents an lvalue references.
Definition: CGValue.h:182
CharUnits getAlignment() const
Definition: CGValue.h:343
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:432
QualType getType() const
Definition: CGValue.h:291
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
A stack of loop information corresponding to loop nesting levels.
Definition: CGLoopInfo.h:204
Information used when generating a structured loop.
Definition: CGLoopInfo.h:90
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
An abstract representation of an aligned address.
Definition: Address.h:42
static RawAddress invalid()
Definition: Address.h:61
bool isValid() const
Definition: Address.h:62
Class provides a way to call simple version of codegen for OpenMP region, or an advanced with possibl...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:386
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition: TargetInfo.h:47
The class detects jumps which bypass local variables declaration: goto L; int a; L:
CompoundAssignOperator - For compound assignments (e.g.
Definition: Expr.h:4171
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3477
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1628
ContinueStmt - This represents a continue.
Definition: Stmt.h:2977
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition: StmtCXX.h:473
Represents the body of a coroutine.
Definition: StmtCXX.h:320
Represents an expression that might suspend coroutine execution; either a co_await or co_yield expres...
Definition: ExprCXX.h:5077
Represents a 'co_yield' expression.
Definition: ExprCXX.h:5272
Represents the current source location and context used to determine the value of the source location...
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2369
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1519
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2752
This represents one expression.
Definition: Expr.h:110
QualType getType() const
Definition: Expr.h:142
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:6354
Represents a member of a struct/union/class.
Definition: Decl.h:3033
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2808
Represents a function declaration or definition.
Definition: Decl.h:1935
Represents a prototype with parameter type info, e.g.
Definition: Type.h:5102
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
const Decl * getDecl() const
Definition: GlobalDecl.h:103
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2889
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition: Expr.h:7152
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2165
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2928
Describes an C or C++ initializer list.
Definition: Expr.h:5088
Represents the declaration of a label.
Definition: Decl.h:503
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2058
FPExceptionModeKind
Possible floating point exception behavior.
Definition: LangOptions.h:287
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:499
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4734
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2796
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3236
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3519
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
This represents '#pragma omp atomic' directive.
Definition: StmtOpenMP.h:2947
This represents '#pragma omp barrier' directive.
Definition: StmtOpenMP.h:2625
This represents '#pragma omp cancel' directive.
Definition: StmtOpenMP.h:3655
This represents '#pragma omp cancellation point' directive.
Definition: StmtOpenMP.h:3597
Representation of an OpenMP canonical loop.
Definition: StmtOpenMP.h:142
This represents '#pragma omp critical' directive.
Definition: StmtOpenMP.h:2076
This represents '#pragma omp depobj' directive.
Definition: StmtOpenMP.h:2841
This represents '#pragma omp distribute' directive.
Definition: StmtOpenMP.h:4425
This represents '#pragma omp distribute parallel for' composite directive.
Definition: StmtOpenMP.h:4547
This represents '#pragma omp distribute parallel for simd' composite directive.
Definition: StmtOpenMP.h:4643
This represents '#pragma omp distribute simd' composite directive.
Definition: StmtOpenMP.h:4708
This represents '#pragma omp error' directive.
Definition: StmtOpenMP.h:6432
This is a basic class for representing single OpenMP executable directive.
Definition: StmtOpenMP.h:266
This represents '#pragma omp flush' directive.
Definition: StmtOpenMP.h:2789
This represents '#pragma omp for' directive.
Definition: StmtOpenMP.h:1634
This represents '#pragma omp for simd' directive.
Definition: StmtOpenMP.h:1724
This represents '#pragma omp loop' directive.
Definition: StmtOpenMP.h:6103
Represents the '#pragma omp interchange' loop transformation directive.
Definition: StmtOpenMP.h:5769
This represents '#pragma omp interop' directive.
Definition: StmtOpenMP.h:5895
This is a common base class for loop directives ('omp simd', 'omp for', 'omp for simd' etc....
Definition: StmtOpenMP.h:1004
This represents '#pragma omp masked' directive.
Definition: StmtOpenMP.h:6013
This represents '#pragma omp master' directive.
Definition: StmtOpenMP.h:2028
This represents '#pragma omp master taskloop' directive.
Definition: StmtOpenMP.h:3854
This represents '#pragma omp master taskloop simd' directive.
Definition: StmtOpenMP.h:4006
This represents '#pragma omp metadirective' directive.
Definition: StmtOpenMP.h:6064
This represents '#pragma omp ordered' directive.
Definition: StmtOpenMP.h:2893
This represents '#pragma omp parallel' directive.
Definition: StmtOpenMP.h:612
This represents '#pragma omp parallel for' directive.
Definition: StmtOpenMP.h:2147
This represents '#pragma omp parallel for simd' directive.
Definition: StmtOpenMP.h:2244
This represents '#pragma omp parallel masked' directive.
Definition: StmtOpenMP.h:2372
This represents '#pragma omp parallel master' directive.
Definition: StmtOpenMP.h:2309
This represents '#pragma omp parallel master taskloop' directive.
Definition: StmtOpenMP.h:4137
This represents '#pragma omp parallel master taskloop simd' directive.
Definition: StmtOpenMP.h:4293
This represents '#pragma omp parallel sections' directive.
Definition: StmtOpenMP.h:2436
Represents the '#pragma omp reverse' loop transformation directive.
Definition: StmtOpenMP.h:5704
This represents '#pragma omp scan' directive.
Definition: StmtOpenMP.h:5842
This represents '#pragma omp scope' directive.
Definition: StmtOpenMP.h:1925
This represents '#pragma omp section' directive.
Definition: StmtOpenMP.h:1864
This represents '#pragma omp sections' directive.
Definition: StmtOpenMP.h:1787
This represents '#pragma omp simd' directive.
Definition: StmtOpenMP.h:1571
This represents '#pragma omp single' directive.
Definition: StmtOpenMP.h:1977
This represents '#pragma omp target data' directive.
Definition: StmtOpenMP.h:3206
This represents '#pragma omp target' directive.
Definition: StmtOpenMP.h:3152
This represents '#pragma omp target enter data' directive.
Definition: StmtOpenMP.h:3260
This represents '#pragma omp target exit data' directive.
Definition: StmtOpenMP.h:3315
This represents '#pragma omp target parallel' directive.
Definition: StmtOpenMP.h:3369
This represents '#pragma omp target parallel for' directive.
Definition: StmtOpenMP.h:3449
This represents '#pragma omp target parallel for simd' directive.
Definition: StmtOpenMP.h:4774
This represents '#pragma omp target parallel loop' directive.
Definition: StmtOpenMP.h:6370
This represents '#pragma omp target simd' directive.
Definition: StmtOpenMP.h:4841
This represents '#pragma omp target teams' directive.
Definition: StmtOpenMP.h:5199
This represents '#pragma omp target teams distribute' combined directive.
Definition: StmtOpenMP.h:5255
This represents '#pragma omp target teams distribute parallel for' combined directive.
Definition: StmtOpenMP.h:5322
This represents '#pragma omp target teams distribute parallel for simd' combined directive.
Definition: StmtOpenMP.h:5420
This represents '#pragma omp target teams distribute simd' combined directive.
Definition: StmtOpenMP.h:5490
This represents '#pragma omp target teams loop' directive.
Definition: StmtOpenMP.h:6230
This represents '#pragma omp target update' directive.
Definition: StmtOpenMP.h:4491
This represents '#pragma omp task' directive.
Definition: StmtOpenMP.h:2517
This represents '#pragma omp taskloop' directive.
Definition: StmtOpenMP.h:3715
This represents '#pragma omp taskloop simd' directive.
Definition: StmtOpenMP.h:3788
This represents '#pragma omp taskgroup' directive.
Definition: StmtOpenMP.h:2722
This represents '#pragma omp taskwait' directive.
Definition: StmtOpenMP.h:2671
This represents '#pragma omp taskyield' directive.
Definition: StmtOpenMP.h:2579
This represents '#pragma omp teams' directive.
Definition: StmtOpenMP.h:3544
This represents '#pragma omp teams distribute' directive.
Definition: StmtOpenMP.h:4906
This represents '#pragma omp teams distribute parallel for' composite directive.
Definition: StmtOpenMP.h:5106
This represents '#pragma omp teams distribute parallel for simd' composite directive.
Definition: StmtOpenMP.h:5040
This represents '#pragma omp teams distribute simd' combined directive.
Definition: StmtOpenMP.h:4972
This represents '#pragma omp teams loop' directive.
Definition: StmtOpenMP.h:6165
This represents the '#pragma omp tile' loop transformation directive.
Definition: StmtOpenMP.h:5548
This represents the '#pragma omp unroll' loop transformation directive.
Definition: StmtOpenMP.h:5630
This represents clause 'use_device_addr' in the '#pragma omp ...' directives.
This represents clause 'use_device_ptr' in the '#pragma omp ...' directives.
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp,...
Definition: ExprObjC.h:191
Represents Objective-C's @synchronized statement.
Definition: StmtObjC.h:303
Represents Objective-C's @throw statement.
Definition: StmtObjC.h:358
Represents Objective-C's @try ... @catch ... @finally statement.
Definition: StmtObjC.h:167
Represents Objective-C's @autoreleasepool Statement.
Definition: StmtObjC.h:394
ObjCBoxedExpr - used for generalized expression boxing.
Definition: ExprObjC.h:127
ObjCContainerDecl - Represents a container for method declarations.
Definition: DeclObjC.h:947
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:309
ObjCEncodeExpr, used for @encode in Objective-C.
Definition: ExprObjC.h:410
Represents Objective-C's collection statement.
Definition: StmtObjC.h:23
ObjCImplementationDecl - Represents a class definition - this is where method definitions are specifi...
Definition: DeclObjC.h:2596
Represents an ObjC class declaration.
Definition: DeclObjC.h:1153
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition: ExprObjC.h:1487
ObjCIvarDecl - Represents an ObjC instance variable.
Definition: DeclObjC.h:1951
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:941
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:140
ObjCPropertyImplDecl - Represents implementation declaration of a property in a class or category imp...
Definition: DeclObjC.h:2804
ObjCProtocolExpr used for protocol expression in Objective-C.
Definition: ExprObjC.h:505
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:455
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:51
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1173
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1223
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Definition: StmtOpenACC.h:131
This class represents a 'loop' construct.
Definition: StmtOpenACC.h:194
Represents a parameter to a function.
Definition: Decl.h:1725
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3198
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1991
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6546
A (possibly-)qualified type.
Definition: Type.h:929
@ DK_cxx_destructor
Definition: Type.h:1521
@ DK_nontrivial_c_struct
Definition: Type.h:1524
@ DK_objc_weak_lifetime
Definition: Type.h:1523
@ DK_objc_strong_lifetime
Definition: Type.h:1522
The collection of all-type qualifiers we support.
Definition: Type.h:324
Represents a struct/union/class.
Definition: Decl.h:4148
bool hasVolatileMember() const
Definition: Decl.h:4211
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:6072
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3046
Represents a __leave statement.
Definition: Stmt.h:3745
Flags to identify the types for overloaded SVE builtins.
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
A trivial tuple used to represent a source range.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4466
Stmt - This represents one statement.
Definition: Stmt.h:84
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1323
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1325
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2415
Exposes information about the current target.
Definition: TargetInfo.h:220
Represents a declaration of a type.
Definition: Decl.h:3370
The base class of the type hierarchy.
Definition: Type.h:1828
bool isReferenceType() const
Definition: Type.h:8204
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8731
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2232
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4750
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:671
QualType getType() const
Definition: Decl.h:682
Represents a variable declaration or definition.
Definition: Decl.h:882
VarDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition: Decl.cpp:2246
bool isLocalVarDeclOrParm() const
Similar to isLocalVarDecl but also includes parameters.
Definition: Decl.h:1213
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3808
Expr * getSizeExpr() const
Definition: Type.h:3827
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2611
Defines the clang::TargetInfo interface.
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:141
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:84
ARCPreciseLifetime_t
Does an ARC strong l-value have precise lifetime?
Definition: CGValue.h:135
@ NotKnownNonNull
Definition: Address.h:33
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const AstTypeMatcher< ComplexType > complexType
Matches C99 complex types.
The JSON file list parser is used to communicate input to InstallAPI.
CXXCtorType
C++ constructor types.
Definition: ABI.h:24
llvm::omp::Directive OpenMPDirectiveKind
OpenMP directives.
Definition: OpenMPKinds.h:25
BinaryOperatorKind
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ CR_Default
Definition: CapturedStmt.h:17
OpenMPDistScheduleClauseKind
OpenMP attributes for 'dist_schedule' clause.
Definition: OpenMPKinds.h:104
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
Definition: Linkage.h:24
@ Result
The result type of a method or function.
CXXDtorType
C++ destructor types.
Definition: ABI.h:33
const FunctionProtoType * T
@ Success
Template argument deduction was successful.
llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
#define true
Definition: stdbool.h:25
#define false
Definition: stdbool.h:26
Structure with information about how a bitfield should be accessed.
llvm::SmallVector< llvm::AllocaInst * > Take()
CXXDefaultArgExprScope(CodeGenFunction &CGF, const CXXDefaultArgExpr *E)
FMVResolverOption(llvm::Function *F, ArrayRef< StringRef > Feats, std::optional< StringRef > Arch=std::nullopt)
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth, unsigned Index)
Header for data within LifetimeExtendedCleanupStack.
unsigned Size
The size of the following cleanup object.
unsigned IsConditional
Whether this is a conditional cleanup.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::OpenMPIRBuilder::InsertPointTy InsertPointTy
static void EmitOMPOutlinedRegionBody(CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Twine RegionName)
Emit the body of an OMP region that will be outlined in OpenMPIRBuilder::finalize().
static Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable /p VD.
static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP, llvm::BasicBlock &FiniBB, llvm::Function *Fn, ArrayRef< llvm::Value * > Args)
static std::string getNameWithSeparators(ArrayRef< StringRef > Parts, StringRef FirstSeparator=".", StringRef Separator=".")
Get the platform-specific name separator.
static void FinalizeOMPRegion(CodeGenFunction &CGF, InsertPointTy IP)
Emit the Finalization for an OMP region.
static void EmitOMPInlinedRegionBody(CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Twine RegionName)
Emit the body of an OMP region.
OMPBuilderCBHelpers & operator=(const OMPBuilderCBHelpers &)=delete
OMPBuilderCBHelpers(const OMPBuilderCBHelpers &)=delete
OMPTargetDataInfo(Address BasePointersArray, Address PointersArray, Address SizesArray, Address MappersArray, unsigned NumberOfTargetItems)
llvm::PointerUnion< const FunctionProtoType *, const ObjCMethodDecl * > P
Struct with all information about dynamic [sub]class needed to set vptr.
This structure provides a set of types that are commonly used during IR emission.
Helper class with most of the code for saving a value for a conditional expression cleanup.
static llvm::Value * restore(CodeGenFunction &CGF, saved_type value)
static saved_type save(CodeGenFunction &CGF, llvm::Value *value)
static bool needsSaving(llvm::Value *value)
Answer whether the given value needs extra work to be saved.
llvm::PointerIntPair< llvm::Value *, 1, bool > saved_type
static type restore(CodeGenFunction &CGF, saved_type value)
static type restore(CodeGenFunction &CGF, saved_type value)
static saved_type save(CodeGenFunction &CGF, type value)
static saved_type save(CodeGenFunction &CGF, type value)
static type restore(CodeGenFunction &CGF, saved_type value)
A metaprogramming class for ensuring that a value will dominate an arbitrary position in a function.
Definition: EHScopeStack.h:65
static saved_type save(CodeGenFunction &CGF, type value)
Definition: EHScopeStack.h:59
Scheduling data for loop-based OpenMP directives.
Definition: OpenMPKinds.h:180
The this pointer adjustment as well as an optional return adjustment for a thunk.
Definition: Thunk.h:157