clang 20.0.0git
CGDecl.cpp
Go to the documentation of this file.
1//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Decl nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBlocks.h"
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGDebugInfo.h"
17#include "CGOpenCLRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "ConstantEmitter.h"
22#include "EHScopeStack.h"
23#include "PatternInit.h"
24#include "TargetInfo.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/CharUnits.h"
28#include "clang/AST/Decl.h"
29#include "clang/AST/DeclObjC.h"
34#include "clang/Sema/Sema.h"
35#include "llvm/Analysis/ConstantFolding.h"
36#include "llvm/Analysis/ValueTracking.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/GlobalVariable.h"
39#include "llvm/IR/Instructions.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/Type.h"
42#include <optional>
43
44using namespace clang;
45using namespace CodeGen;
46
47static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
48 "Clang max alignment greater than what LLVM supports?");
49
50void CodeGenFunction::EmitDecl(const Decl &D) {
51 switch (D.getKind()) {
52 case Decl::BuiltinTemplate:
53 case Decl::TranslationUnit:
54 case Decl::ExternCContext:
55 case Decl::Namespace:
56 case Decl::UnresolvedUsingTypename:
57 case Decl::ClassTemplateSpecialization:
58 case Decl::ClassTemplatePartialSpecialization:
59 case Decl::VarTemplateSpecialization:
60 case Decl::VarTemplatePartialSpecialization:
61 case Decl::TemplateTypeParm:
62 case Decl::UnresolvedUsingValue:
63 case Decl::NonTypeTemplateParm:
64 case Decl::CXXDeductionGuide:
65 case Decl::CXXMethod:
66 case Decl::CXXConstructor:
67 case Decl::CXXDestructor:
68 case Decl::CXXConversion:
69 case Decl::Field:
70 case Decl::MSProperty:
71 case Decl::IndirectField:
72 case Decl::ObjCIvar:
73 case Decl::ObjCAtDefsField:
74 case Decl::ParmVar:
75 case Decl::ImplicitParam:
76 case Decl::ClassTemplate:
77 case Decl::VarTemplate:
78 case Decl::FunctionTemplate:
79 case Decl::TypeAliasTemplate:
80 case Decl::TemplateTemplateParm:
81 case Decl::ObjCMethod:
82 case Decl::ObjCCategory:
83 case Decl::ObjCProtocol:
84 case Decl::ObjCInterface:
85 case Decl::ObjCCategoryImpl:
86 case Decl::ObjCImplementation:
87 case Decl::ObjCProperty:
88 case Decl::ObjCCompatibleAlias:
89 case Decl::PragmaComment:
90 case Decl::PragmaDetectMismatch:
91 case Decl::AccessSpec:
92 case Decl::LinkageSpec:
93 case Decl::Export:
94 case Decl::ObjCPropertyImpl:
95 case Decl::FileScopeAsm:
96 case Decl::TopLevelStmt:
97 case Decl::Friend:
98 case Decl::FriendTemplate:
99 case Decl::Block:
100 case Decl::Captured:
101 case Decl::UsingShadow:
102 case Decl::ConstructorUsingShadow:
103 case Decl::ObjCTypeParam:
104 case Decl::Binding:
105 case Decl::UnresolvedUsingIfExists:
106 case Decl::HLSLBuffer:
107 llvm_unreachable("Declaration should not be in declstmts!");
108 case Decl::Record: // struct/union/class X;
109 case Decl::CXXRecord: // struct/union/class X; [C++]
110 if (CGDebugInfo *DI = getDebugInfo())
111 if (cast<RecordDecl>(D).getDefinition())
112 DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(&D)));
113 return;
114 case Decl::Enum: // enum X;
115 if (CGDebugInfo *DI = getDebugInfo())
116 if (cast<EnumDecl>(D).getDefinition())
117 DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(&D)));
118 return;
119 case Decl::Function: // void X();
120 case Decl::EnumConstant: // enum ? { X = ? }
121 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
122 case Decl::Label: // __label__ x;
123 case Decl::Import:
124 case Decl::MSGuid: // __declspec(uuid("..."))
125 case Decl::UnnamedGlobalConstant:
126 case Decl::TemplateParamObject:
127 case Decl::OMPThreadPrivate:
128 case Decl::OMPAllocate:
129 case Decl::OMPCapturedExpr:
130 case Decl::OMPRequires:
131 case Decl::Empty:
132 case Decl::Concept:
133 case Decl::ImplicitConceptSpecialization:
134 case Decl::LifetimeExtendedTemporary:
135 case Decl::RequiresExprBody:
136 // None of these decls require codegen support.
137 return;
138
139 case Decl::NamespaceAlias:
140 if (CGDebugInfo *DI = getDebugInfo())
141 DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
142 return;
143 case Decl::Using: // using X; [C++]
144 if (CGDebugInfo *DI = getDebugInfo())
145 DI->EmitUsingDecl(cast<UsingDecl>(D));
146 return;
147 case Decl::UsingEnum: // using enum X; [C++]
148 if (CGDebugInfo *DI = getDebugInfo())
149 DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(D));
150 return;
151 case Decl::UsingPack:
152 for (auto *Using : cast<UsingPackDecl>(D).expansions())
153 EmitDecl(*Using);
154 return;
155 case Decl::UsingDirective: // using namespace X; [C++]
156 if (CGDebugInfo *DI = getDebugInfo())
157 DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
158 return;
159 case Decl::Var:
160 case Decl::Decomposition: {
161 const VarDecl &VD = cast<VarDecl>(D);
162 assert(VD.isLocalVarDecl() &&
163 "Should not see file-scope variables inside a function!");
164 EmitVarDecl(VD);
165 if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
166 for (auto *B : DD->bindings())
167 if (auto *HD = B->getHoldingVar())
168 EmitVarDecl(*HD);
169 return;
170 }
171
172 case Decl::OMPDeclareReduction:
173 return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
174
175 case Decl::OMPDeclareMapper:
176 return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
177
178 case Decl::Typedef: // typedef int X;
179 case Decl::TypeAlias: { // using X = int; [C++0x]
180 QualType Ty = cast<TypedefNameDecl>(D).getUnderlyingType();
181 if (CGDebugInfo *DI = getDebugInfo())
182 DI->EmitAndRetainType(Ty);
183 if (Ty->isVariablyModifiedType())
185 return;
186 }
187 }
188}
189
190/// EmitVarDecl - This method handles emission of any variable declaration
191/// inside a function, including static vars etc.
193 if (D.hasExternalStorage())
194 // Don't emit it now, allow it to be emitted lazily on its first use.
195 return;
196
197 // Some function-scope variable does not have static storage but still
198 // needs to be emitted like a static variable, e.g. a function-scope
199 // variable in constant address space in OpenCL.
200 if (D.getStorageDuration() != SD_Automatic) {
201 // Static sampler variables translated to function calls.
202 if (D.getType()->isSamplerT())
203 return;
204
205 llvm::GlobalValue::LinkageTypes Linkage =
207
208 // FIXME: We need to force the emission/use of a guard variable for
209 // some variables even if we can constant-evaluate them because
210 // we can't guarantee every translation unit will constant-evaluate them.
211
212 return EmitStaticVarDecl(D, Linkage);
213 }
214
215 if (D.getType().getAddressSpace() == LangAS::opencl_local)
217
218 assert(D.hasLocalStorage());
219 return EmitAutoVarDecl(D);
220}
221
222static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
223 if (CGM.getLangOpts().CPlusPlus)
224 return CGM.getMangledName(&D).str();
225
226 // If this isn't C++, we don't need a mangled name, just a pretty one.
227 assert(!D.isExternallyVisible() && "name shouldn't matter");
228 std::string ContextName;
229 const DeclContext *DC = D.getDeclContext();
230 if (auto *CD = dyn_cast<CapturedDecl>(DC))
231 DC = cast<DeclContext>(CD->getNonClosureContext());
232 if (const auto *FD = dyn_cast<FunctionDecl>(DC))
233 ContextName = std::string(CGM.getMangledName(FD));
234 else if (const auto *BD = dyn_cast<BlockDecl>(DC))
235 ContextName = std::string(CGM.getBlockMangledName(GlobalDecl(), BD));
236 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
237 ContextName = OMD->getSelector().getAsString();
238 else
239 llvm_unreachable("Unknown context for static var decl");
240
241 ContextName += "." + D.getNameAsString();
242 return ContextName;
243}
244
246 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
247 // In general, we don't always emit static var decls once before we reference
248 // them. It is possible to reference them before emitting the function that
249 // contains them, and it is possible to emit the containing function multiple
250 // times.
251 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
252 return ExistingGV;
253
254 QualType Ty = D.getType();
255 assert(Ty->isConstantSizeType() && "VLAs can't be static");
256
257 // Use the label if the variable is renamed with the asm-label extension.
258 std::string Name;
259 if (D.hasAttr<AsmLabelAttr>())
260 Name = std::string(getMangledName(&D));
261 else
262 Name = getStaticDeclName(*this, D);
263
264 llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
266 unsigned TargetAS = getContext().getTargetAddressSpace(AS);
267
268 // OpenCL variables in local address space and CUDA shared
269 // variables cannot have an initializer.
270 llvm::Constant *Init = nullptr;
272 D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
273 Init = llvm::UndefValue::get(LTy);
274 else
276
277 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
278 getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
279 nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
280 GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
281
282 if (supportsCOMDAT() && GV->isWeakForLinker())
283 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
284
285 if (D.getTLSKind())
286 setTLSMode(GV, D);
287
288 setGVProperties(GV, &D);
289 getTargetCodeGenInfo().setTargetAttributes(cast<Decl>(&D), GV, *this);
290
291 // Make sure the result is of the correct type.
292 LangAS ExpectedAS = Ty.getAddressSpace();
293 llvm::Constant *Addr = GV;
294 if (AS != ExpectedAS) {
296 *this, GV, AS, ExpectedAS,
297 llvm::PointerType::get(getLLVMContext(),
298 getContext().getTargetAddressSpace(ExpectedAS)));
299 }
300
302
303 // Ensure that the static local gets initialized by making sure the parent
304 // function gets emitted eventually.
305 const Decl *DC = cast<Decl>(D.getDeclContext());
306
307 // We can't name blocks or captured statements directly, so try to emit their
308 // parents.
309 if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
310 DC = DC->getNonClosureContext();
311 // FIXME: Ensure that global blocks get emitted.
312 if (!DC)
313 return Addr;
314 }
315
316 GlobalDecl GD;
317 if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
318 GD = GlobalDecl(CD, Ctor_Base);
319 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
320 GD = GlobalDecl(DD, Dtor_Base);
321 else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
322 GD = GlobalDecl(FD);
323 else {
324 // Don't do anything for Obj-C method decls or global closures. We should
325 // never defer them.
326 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
327 }
328 if (GD.getDecl()) {
329 // Disable emission of the parent function for the OpenMP device codegen.
331 (void)GetAddrOfGlobal(GD);
332 }
333
334 return Addr;
335}
336
337/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
338/// global variable that has already been created for it. If the initializer
339/// has a different type than GV does, this may free GV and return a different
340/// one. Otherwise it just returns GV.
341llvm::GlobalVariable *
343 llvm::GlobalVariable *GV) {
344 ConstantEmitter emitter(*this);
345 llvm::Constant *Init = emitter.tryEmitForInitializer(D);
346
347 // If constant emission failed, then this should be a C++ static
348 // initializer.
349 if (!Init) {
350 if (!getLangOpts().CPlusPlus)
351 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
352 else if (D.hasFlexibleArrayInit(getContext()))
353 CGM.ErrorUnsupported(D.getInit(), "flexible array initializer");
354 else if (HaveInsertPoint()) {
355 // Since we have a static initializer, this global variable can't
356 // be constant.
357 GV->setConstant(false);
358
359 EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
360 }
361 return GV;
362 }
363
364#ifndef NDEBUG
365 CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) +
366 D.getFlexibleArrayInitChars(getContext());
368 CGM.getDataLayout().getTypeAllocSize(Init->getType()));
369 assert(VarSize == CstSize && "Emitted constant has unexpected size");
370#endif
371
372 bool NeedsDtor =
373 D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
374
375 GV->setConstant(
376 D.getType().isConstantStorage(getContext(), true, !NeedsDtor));
377 GV->replaceInitializer(Init);
378
379 emitter.finalize(GV);
380
381 if (NeedsDtor && HaveInsertPoint()) {
382 // We have a constant initializer, but a nontrivial destructor. We still
383 // need to perform a guarded "initialization" in order to register the
384 // destructor.
385 EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
386 }
387
388 return GV;
389}
390
392 llvm::GlobalValue::LinkageTypes Linkage) {
393 // Check to see if we already have a global variable for this
394 // declaration. This can happen when double-emitting function
395 // bodies, e.g. with complete and base constructors.
396 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
397 CharUnits alignment = getContext().getDeclAlign(&D);
398
399 // Store into LocalDeclMap before generating initializer to handle
400 // circular references.
401 llvm::Type *elemTy = ConvertTypeForMem(D.getType());
402 setAddrOfLocalVar(&D, Address(addr, elemTy, alignment));
403
404 // We can't have a VLA here, but we can have a pointer to a VLA,
405 // even though that doesn't really make any sense.
406 // Make sure to evaluate VLA bounds now so that we have them for later.
407 if (D.getType()->isVariablyModifiedType())
408 EmitVariablyModifiedType(D.getType());
409
410 // Save the type in case adding the initializer forces a type change.
411 llvm::Type *expectedType = addr->getType();
412
413 llvm::GlobalVariable *var =
414 cast<llvm::GlobalVariable>(addr->stripPointerCasts());
415
416 // CUDA's local and local static __shared__ variables should not
417 // have any non-empty initializers. This is ensured by Sema.
418 // Whatever initializer such variable may have when it gets here is
419 // a no-op and should not be emitted.
420 bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
421 D.hasAttr<CUDASharedAttr>();
422 // If this value has an initializer, emit it.
423 if (D.getInit() && !isCudaSharedVar)
425
426 var->setAlignment(alignment.getAsAlign());
427
428 if (D.hasAttr<AnnotateAttr>())
430
431 if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
432 var->addAttribute("bss-section", SA->getName());
433 if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
434 var->addAttribute("data-section", SA->getName());
435 if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
436 var->addAttribute("rodata-section", SA->getName());
437 if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
438 var->addAttribute("relro-section", SA->getName());
439
440 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
441 var->setSection(SA->getName());
442
443 if (D.hasAttr<RetainAttr>())
444 CGM.addUsedGlobal(var);
445 else if (D.hasAttr<UsedAttr>())
447
448 if (CGM.getCodeGenOpts().KeepPersistentStorageVariables)
450
451 // We may have to cast the constant because of the initializer
452 // mismatch above.
453 //
454 // FIXME: It is really dangerous to store this in the map; if anyone
455 // RAUW's the GV uses of this constant will be invalid.
456 llvm::Constant *castedAddr =
457 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
458 LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment);
459 CGM.setStaticLocalDeclAddress(&D, castedAddr);
460
462
463 // Emit global variable debug descriptor for static vars.
465 if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
467 DI->EmitGlobalVariable(var, &D);
468 }
469}
470
471namespace {
472 struct DestroyObject final : EHScopeStack::Cleanup {
473 DestroyObject(Address addr, QualType type,
474 CodeGenFunction::Destroyer *destroyer,
475 bool useEHCleanupForArray)
476 : addr(addr), type(type), destroyer(destroyer),
477 useEHCleanupForArray(useEHCleanupForArray) {}
478
479 Address addr;
481 CodeGenFunction::Destroyer *destroyer;
482 bool useEHCleanupForArray;
483
484 void Emit(CodeGenFunction &CGF, Flags flags) override {
485 // Don't use an EH cleanup recursively from an EH cleanup.
486 bool useEHCleanupForArray =
487 flags.isForNormalCleanup() && this->useEHCleanupForArray;
488
489 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
490 }
491 };
492
493 template <class Derived>
494 struct DestroyNRVOVariable : EHScopeStack::Cleanup {
495 DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
496 : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
497
498 llvm::Value *NRVOFlag;
499 Address Loc;
500 QualType Ty;
501
502 void Emit(CodeGenFunction &CGF, Flags flags) override {
503 // Along the exceptions path we always execute the dtor.
504 bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
505
506 llvm::BasicBlock *SkipDtorBB = nullptr;
507 if (NRVO) {
508 // If we exited via NRVO, we skip the destructor call.
509 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
510 SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
511 llvm::Value *DidNRVO =
512 CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
513 CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
514 CGF.EmitBlock(RunDtorBB);
515 }
516
517 static_cast<Derived *>(this)->emitDestructorCall(CGF);
518
519 if (NRVO) CGF.EmitBlock(SkipDtorBB);
520 }
521
522 virtual ~DestroyNRVOVariable() = default;
523 };
524
525 struct DestroyNRVOVariableCXX final
526 : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
527 DestroyNRVOVariableCXX(Address addr, QualType type,
528 const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
529 : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
530 Dtor(Dtor) {}
531
532 const CXXDestructorDecl *Dtor;
533
534 void emitDestructorCall(CodeGenFunction &CGF) {
536 /*ForVirtualBase=*/false,
537 /*Delegating=*/false, Loc, Ty);
538 }
539 };
540
541 struct DestroyNRVOVariableC final
542 : DestroyNRVOVariable<DestroyNRVOVariableC> {
543 DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
544 : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
545
546 void emitDestructorCall(CodeGenFunction &CGF) {
547 CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
548 }
549 };
550
551 struct CallStackRestore final : EHScopeStack::Cleanup {
552 Address Stack;
553 CallStackRestore(Address Stack) : Stack(Stack) {}
554 bool isRedundantBeforeReturn() override { return true; }
555 void Emit(CodeGenFunction &CGF, Flags flags) override {
556 llvm::Value *V = CGF.Builder.CreateLoad(Stack);
557 CGF.Builder.CreateStackRestore(V);
558 }
559 };
560
561 struct KmpcAllocFree final : EHScopeStack::Cleanup {
562 std::pair<llvm::Value *, llvm::Value *> AddrSizePair;
563 KmpcAllocFree(const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair)
564 : AddrSizePair(AddrSizePair) {}
565 void Emit(CodeGenFunction &CGF, Flags EmissionFlags) override {
566 auto &RT = CGF.CGM.getOpenMPRuntime();
567 RT.getKmpcFreeShared(CGF, AddrSizePair);
568 }
569 };
570
571 struct ExtendGCLifetime final : EHScopeStack::Cleanup {
572 const VarDecl &Var;
573 ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
574
575 void Emit(CodeGenFunction &CGF, Flags flags) override {
576 // Compute the address of the local variable, in case it's a
577 // byref or something.
578 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
580 llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
582 CGF.EmitExtendGCLifetime(value);
583 }
584 };
585
586 struct CallCleanupFunction final : EHScopeStack::Cleanup {
587 llvm::Constant *CleanupFn;
588 const CGFunctionInfo &FnInfo;
589 const VarDecl &Var;
590
591 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
592 const VarDecl *Var)
593 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
594
595 void Emit(CodeGenFunction &CGF, Flags flags) override {
596 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
598 // Compute the address of the local variable, in case it's a byref
599 // or something.
600 llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF);
601
602 // In some cases, the type of the function argument will be different from
603 // the type of the pointer. An example of this is
604 // void f(void* arg);
605 // __attribute__((cleanup(f))) void *g;
606 //
607 // To fix this we insert a bitcast here.
608 QualType ArgTy = FnInfo.arg_begin()->type;
609 llvm::Value *Arg =
610 CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
611
612 CallArgList Args;
613 Args.add(RValue::get(Arg),
614 CGF.getContext().getPointerType(Var.getType()));
615 auto Callee = CGCallee::forDirect(CleanupFn);
616 CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
617 }
618 };
619} // end anonymous namespace
620
621/// EmitAutoVarWithLifetime - Does the setup required for an automatic
622/// variable with lifetime.
624 Address addr,
625 Qualifiers::ObjCLifetime lifetime) {
626 switch (lifetime) {
628 llvm_unreachable("present but none");
629
631 // nothing to do
632 break;
633
635 CodeGenFunction::Destroyer *destroyer =
636 (var.hasAttr<ObjCPreciseLifetimeAttr>()
639
640 CleanupKind cleanupKind = CGF.getARCCleanupKind();
641 CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
642 cleanupKind & EHCleanup);
643 break;
644 }
646 // nothing to do
647 break;
648
650 // __weak objects always get EH cleanups; otherwise, exceptions
651 // could cause really nasty crashes instead of mere leaks.
652 CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
654 /*useEHCleanup*/ true);
655 break;
656 }
657}
658
659static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
660 if (const Expr *e = dyn_cast<Expr>(s)) {
661 // Skip the most common kinds of expressions that make
662 // hierarchy-walking expensive.
663 s = e = e->IgnoreParenCasts();
664
665 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
666 return (ref->getDecl() == &var);
667 if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
668 const BlockDecl *block = be->getBlockDecl();
669 for (const auto &I : block->captures()) {
670 if (I.getVariable() == &var)
671 return true;
672 }
673 }
674 }
675
676 for (const Stmt *SubStmt : s->children())
677 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
678 if (SubStmt && isAccessedBy(var, SubStmt))
679 return true;
680
681 return false;
682}
683
684static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
685 if (!decl) return false;
686 if (!isa<VarDecl>(decl)) return false;
687 const VarDecl *var = cast<VarDecl>(decl);
688 return isAccessedBy(*var, e);
689}
690
692 const LValue &destLV, const Expr *init) {
693 bool needsCast = false;
694
695 while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
696 switch (castExpr->getCastKind()) {
697 // Look through casts that don't require representation changes.
698 case CK_NoOp:
699 case CK_BitCast:
700 case CK_BlockPointerToObjCPointerCast:
701 needsCast = true;
702 break;
703
704 // If we find an l-value to r-value cast from a __weak variable,
705 // emit this operation as a copy or move.
706 case CK_LValueToRValue: {
707 const Expr *srcExpr = castExpr->getSubExpr();
708 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
709 return false;
710
711 // Emit the source l-value.
712 LValue srcLV = CGF.EmitLValue(srcExpr);
713
714 // Handle a formal type change to avoid asserting.
715 auto srcAddr = srcLV.getAddress();
716 if (needsCast) {
717 srcAddr = srcAddr.withElementType(destLV.getAddress().getElementType());
718 }
719
720 // If it was an l-value, use objc_copyWeak.
721 if (srcExpr->isLValue()) {
722 CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
723 } else {
724 assert(srcExpr->isXValue());
725 CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
726 }
727 return true;
728 }
729
730 // Stop at anything else.
731 default:
732 return false;
733 }
734
735 init = castExpr->getSubExpr();
736 }
737 return false;
738}
739
741 LValue &lvalue,
742 const VarDecl *var) {
743 lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
744}
745
746void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
748 if (!SanOpts.has(SanitizerKind::NullabilityAssign))
749 return;
750
751 auto Nullability = LHS.getType()->getNullability();
752 if (!Nullability || *Nullability != NullabilityKind::NonNull)
753 return;
754
755 // Check if the right hand side of the assignment is nonnull, if the left
756 // hand side must be nonnull.
757 SanitizerScope SanScope(this);
758 llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
759 llvm::Constant *StaticData[] = {
761 llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
762 llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
763 EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
764 SanitizerHandler::TypeMismatch, StaticData, RHS);
765}
766
767void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
768 LValue lvalue, bool capturedByInit) {
769 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
770 if (!lifetime) {
771 llvm::Value *value = EmitScalarExpr(init);
772 if (capturedByInit)
773 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
774 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
775 EmitStoreThroughLValue(RValue::get(value), lvalue, true);
776 return;
777 }
778
779 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
780 init = DIE->getExpr();
781
782 // If we're emitting a value with lifetime, we have to do the
783 // initialization *before* we leave the cleanup scopes.
784 if (auto *EWC = dyn_cast<ExprWithCleanups>(init)) {
785 CodeGenFunction::RunCleanupsScope Scope(*this);
786 return EmitScalarInit(EWC->getSubExpr(), D, lvalue, capturedByInit);
787 }
788
789 // We have to maintain the illusion that the variable is
790 // zero-initialized. If the variable might be accessed in its
791 // initializer, zero-initialize before running the initializer, then
792 // actually perform the initialization with an assign.
793 bool accessedByInit = false;
794 if (lifetime != Qualifiers::OCL_ExplicitNone)
795 accessedByInit = (capturedByInit || isAccessedBy(D, init));
796 if (accessedByInit) {
797 LValue tempLV = lvalue;
798 // Drill down to the __block object if necessary.
799 if (capturedByInit) {
800 // We can use a simple GEP for this because it can't have been
801 // moved yet.
803 cast<VarDecl>(D),
804 /*follow*/ false));
805 }
806
807 auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
808 llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
809
810 // If __weak, we want to use a barrier under certain conditions.
811 if (lifetime == Qualifiers::OCL_Weak)
812 EmitARCInitWeak(tempLV.getAddress(), zero);
813
814 // Otherwise just do a simple store.
815 else
816 EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
817 }
818
819 // Emit the initializer.
820 llvm::Value *value = nullptr;
821
822 switch (lifetime) {
824 llvm_unreachable("present but none");
825
827 if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
828 value = EmitARCRetainScalarExpr(init);
829 break;
830 }
831 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
832 // that we omit the retain, and causes non-autoreleased return values to be
833 // immediately released.
834 [[fallthrough]];
835 }
836
839 break;
840
842 // If it's not accessed by the initializer, try to emit the
843 // initialization with a copy or move.
844 if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
845 return;
846 }
847
848 // No way to optimize a producing initializer into this. It's not
849 // worth optimizing for, because the value will immediately
850 // disappear in the common case.
851 value = EmitScalarExpr(init);
852
853 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
854 if (accessedByInit)
855 EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
856 else
857 EmitARCInitWeak(lvalue.getAddress(), value);
858 return;
859 }
860
863 break;
864 }
865
866 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
867
868 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
869
870 // If the variable might have been accessed by its initializer, we
871 // might have to initialize with a barrier. We have to do this for
872 // both __weak and __strong, but __weak got filtered out above.
873 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
874 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
875 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
877 return;
878 }
879
880 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
881}
882
883/// Decide whether we can emit the non-zero parts of the specified initializer
884/// with equal or fewer than NumStores scalar stores.
885static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
886 unsigned &NumStores) {
887 // Zero and Undef never requires any extra stores.
888 if (isa<llvm::ConstantAggregateZero>(Init) ||
889 isa<llvm::ConstantPointerNull>(Init) ||
890 isa<llvm::UndefValue>(Init))
891 return true;
892 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
893 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
894 isa<llvm::ConstantExpr>(Init))
895 return Init->isNullValue() || NumStores--;
896
897 // See if we can emit each element.
898 if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
899 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
900 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
901 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
902 return false;
903 }
904 return true;
905 }
906
907 if (llvm::ConstantDataSequential *CDS =
908 dyn_cast<llvm::ConstantDataSequential>(Init)) {
909 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
910 llvm::Constant *Elt = CDS->getElementAsConstant(i);
911 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
912 return false;
913 }
914 return true;
915 }
916
917 // Anything else is hard and scary.
918 return false;
919}
920
921/// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
922/// the scalar stores that would be required.
924 llvm::Constant *Init, Address Loc,
925 bool isVolatile, CGBuilderTy &Builder,
926 bool IsAutoInit) {
927 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
928 "called emitStoresForInitAfterBZero for zero or undef value.");
929
930 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
931 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
932 isa<llvm::ConstantExpr>(Init)) {
933 auto *I = Builder.CreateStore(Init, Loc, isVolatile);
934 if (IsAutoInit)
935 I->addAnnotationMetadata("auto-init");
936 return;
937 }
938
939 if (llvm::ConstantDataSequential *CDS =
940 dyn_cast<llvm::ConstantDataSequential>(Init)) {
941 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
942 llvm::Constant *Elt = CDS->getElementAsConstant(i);
943
944 // If necessary, get a pointer to the element and emit it.
945 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
947 CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
948 Builder, IsAutoInit);
949 }
950 return;
951 }
952
953 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
954 "Unknown value type!");
955
956 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
957 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
958
959 // If necessary, get a pointer to the element and emit it.
960 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
962 Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
963 isVolatile, Builder, IsAutoInit);
964 }
965}
966
967/// Decide whether we should use bzero plus some stores to initialize a local
968/// variable instead of using a memcpy from a constant global. It is beneficial
969/// to use bzero if the global is all zeros, or mostly zeros and large.
970static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
971 uint64_t GlobalSize) {
972 // If a global is all zeros, always use a bzero.
973 if (isa<llvm::ConstantAggregateZero>(Init)) return true;
974
975 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
976 // do it if it will require 6 or fewer scalar stores.
977 // TODO: Should budget depends on the size? Avoiding a large global warrants
978 // plopping in more stores.
979 unsigned StoreBudget = 6;
980 uint64_t SizeLimit = 32;
981
982 return GlobalSize > SizeLimit &&
984}
985
986/// Decide whether we should use memset to initialize a local variable instead
987/// of using a memcpy from a constant global. Assumes we've already decided to
988/// not user bzero.
989/// FIXME We could be more clever, as we are for bzero above, and generate
990/// memset followed by stores. It's unclear that's worth the effort.
991static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
992 uint64_t GlobalSize,
993 const llvm::DataLayout &DL) {
994 uint64_t SizeLimit = 32;
995 if (GlobalSize <= SizeLimit)
996 return nullptr;
997 return llvm::isBytewiseValue(Init, DL);
998}
999
1000/// Decide whether we want to split a constant structure or array store into a
1001/// sequence of its fields' stores. This may cost us code size and compilation
1002/// speed, but plays better with store optimizations.
1004 uint64_t GlobalByteSize) {
1005 // Don't break things that occupy more than one cacheline.
1006 uint64_t ByteSizeLimit = 64;
1007 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1008 return false;
1009 if (GlobalByteSize <= ByteSizeLimit)
1010 return true;
1011 return false;
1012}
1013
1014enum class IsPattern { No, Yes };
1015
1016/// Generate a constant filled with either a pattern or zeroes.
1017static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
1018 llvm::Type *Ty) {
1019 if (isPattern == IsPattern::Yes)
1020 return initializationPatternFor(CGM, Ty);
1021 else
1022 return llvm::Constant::getNullValue(Ty);
1023}
1024
1025static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1026 llvm::Constant *constant);
1027
1028/// Helper function for constWithPadding() to deal with padding in structures.
1029static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1030 IsPattern isPattern,
1031 llvm::StructType *STy,
1032 llvm::Constant *constant) {
1033 const llvm::DataLayout &DL = CGM.getDataLayout();
1034 const llvm::StructLayout *Layout = DL.getStructLayout(STy);
1035 llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
1036 unsigned SizeSoFar = 0;
1038 bool NestedIntact = true;
1039 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1040 unsigned CurOff = Layout->getElementOffset(i);
1041 if (SizeSoFar < CurOff) {
1042 assert(!STy->isPacked());
1043 auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
1044 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1045 }
1046 llvm::Constant *CurOp;
1047 if (constant->isZeroValue())
1048 CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
1049 else
1050 CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
1051 auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
1052 if (CurOp != NewOp)
1053 NestedIntact = false;
1054 Values.push_back(NewOp);
1055 SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
1056 }
1057 unsigned TotalSize = Layout->getSizeInBytes();
1058 if (SizeSoFar < TotalSize) {
1059 auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
1060 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1061 }
1062 if (NestedIntact && Values.size() == STy->getNumElements())
1063 return constant;
1064 return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
1065}
1066
1067/// Replace all padding bytes in a given constant with either a pattern byte or
1068/// 0x00.
1069static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1070 llvm::Constant *constant) {
1071 llvm::Type *OrigTy = constant->getType();
1072 if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
1073 return constStructWithPadding(CGM, isPattern, STy, constant);
1074 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(OrigTy)) {
1076 uint64_t Size = ArrayTy->getNumElements();
1077 if (!Size)
1078 return constant;
1079 llvm::Type *ElemTy = ArrayTy->getElementType();
1080 bool ZeroInitializer = constant->isNullValue();
1081 llvm::Constant *OpValue, *PaddedOp;
1082 if (ZeroInitializer) {
1083 OpValue = llvm::Constant::getNullValue(ElemTy);
1084 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1085 }
1086 for (unsigned Op = 0; Op != Size; ++Op) {
1087 if (!ZeroInitializer) {
1088 OpValue = constant->getAggregateElement(Op);
1089 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1090 }
1091 Values.push_back(PaddedOp);
1092 }
1093 auto *NewElemTy = Values[0]->getType();
1094 if (NewElemTy == ElemTy)
1095 return constant;
1096 auto *NewArrayTy = llvm::ArrayType::get(NewElemTy, Size);
1097 return llvm::ConstantArray::get(NewArrayTy, Values);
1098 }
1099 // FIXME: Add handling for tail padding in vectors. Vectors don't
1100 // have padding between or inside elements, but the total amount of
1101 // data can be less than the allocated size.
1102 return constant;
1103}
1104
1106 llvm::Constant *Constant,
1107 CharUnits Align) {
1108 auto FunctionName = [&](const DeclContext *DC) -> std::string {
1109 if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
1110 if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
1111 return CC->getNameAsString();
1112 if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
1113 return CD->getNameAsString();
1114 return std::string(getMangledName(FD));
1115 } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
1116 return OM->getNameAsString();
1117 } else if (isa<BlockDecl>(DC)) {
1118 return "<block>";
1119 } else if (isa<CapturedDecl>(DC)) {
1120 return "<captured>";
1121 } else {
1122 llvm_unreachable("expected a function or method");
1123 }
1124 };
1125
1126 // Form a simple per-variable cache of these values in case we find we
1127 // want to reuse them.
1128 llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1129 if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1130 auto *Ty = Constant->getType();
1131 bool isConstant = true;
1132 llvm::GlobalVariable *InsertBefore = nullptr;
1133 unsigned AS =
1135 std::string Name;
1136 if (D.hasGlobalStorage())
1137 Name = getMangledName(&D).str() + ".const";
1138 else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1139 Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1140 else
1141 llvm_unreachable("local variable has no parent function or method");
1142 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1143 getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1144 Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1145 GV->setAlignment(Align.getAsAlign());
1146 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1147 CacheEntry = GV;
1148 } else if (CacheEntry->getAlignment() < uint64_t(Align.getQuantity())) {
1149 CacheEntry->setAlignment(Align.getAsAlign());
1150 }
1151
1152 return Address(CacheEntry, CacheEntry->getValueType(), Align);
1153}
1154
1156 const VarDecl &D,
1157 CGBuilderTy &Builder,
1158 llvm::Constant *Constant,
1159 CharUnits Align) {
1160 Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1161 return SrcPtr.withElementType(CGM.Int8Ty);
1162}
1163
1165 Address Loc, bool isVolatile,
1166 CGBuilderTy &Builder,
1167 llvm::Constant *constant, bool IsAutoInit) {
1168 auto *Ty = constant->getType();
1169 uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1170 if (!ConstantSize)
1171 return;
1172
1173 bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1174 Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1175 if (canDoSingleStore) {
1176 auto *I = Builder.CreateStore(constant, Loc, isVolatile);
1177 if (IsAutoInit)
1178 I->addAnnotationMetadata("auto-init");
1179 return;
1180 }
1181
1182 auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
1183
1184 // If the initializer is all or mostly the same, codegen with bzero / memset
1185 // then do a few stores afterward.
1186 if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
1187 auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0),
1188 SizeVal, isVolatile);
1189 if (IsAutoInit)
1190 I->addAnnotationMetadata("auto-init");
1191
1192 bool valueAlreadyCorrect =
1193 constant->isNullValue() || isa<llvm::UndefValue>(constant);
1194 if (!valueAlreadyCorrect) {
1195 Loc = Loc.withElementType(Ty);
1196 emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder,
1197 IsAutoInit);
1198 }
1199 return;
1200 }
1201
1202 // If the initializer is a repeated byte pattern, use memset.
1203 llvm::Value *Pattern =
1204 shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
1205 if (Pattern) {
1206 uint64_t Value = 0x00;
1207 if (!isa<llvm::UndefValue>(Pattern)) {
1208 const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
1209 assert(AP.getBitWidth() <= 8);
1210 Value = AP.getLimitedValue();
1211 }
1212 auto *I = Builder.CreateMemSet(
1213 Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal, isVolatile);
1214 if (IsAutoInit)
1215 I->addAnnotationMetadata("auto-init");
1216 return;
1217 }
1218
1219 // If the initializer is small or trivialAutoVarInit is set, use a handful of
1220 // stores.
1221 bool IsTrivialAutoVarInitPattern =
1222 CGM.getContext().getLangOpts().getTrivialAutoVarInit() ==
1224 if (shouldSplitConstantStore(CGM, ConstantSize)) {
1225 if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
1226 if (STy == Loc.getElementType() ||
1227 (STy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1228 const llvm::StructLayout *Layout =
1229 CGM.getDataLayout().getStructLayout(STy);
1230 for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1231 CharUnits CurOff =
1232 CharUnits::fromQuantity(Layout->getElementOffset(i));
1233 Address EltPtr = Builder.CreateConstInBoundsByteGEP(
1234 Loc.withElementType(CGM.Int8Ty), CurOff);
1235 emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
1236 constant->getAggregateElement(i), IsAutoInit);
1237 }
1238 return;
1239 }
1240 } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
1241 if (ATy == Loc.getElementType() ||
1242 (ATy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1243 for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1244 Address EltPtr = Builder.CreateConstGEP(
1245 Loc.withElementType(ATy->getElementType()), i);
1246 emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
1247 constant->getAggregateElement(i), IsAutoInit);
1248 }
1249 return;
1250 }
1251 }
1252 }
1253
1254 // Copy from a global.
1255 auto *I =
1256 Builder.CreateMemCpy(Loc,
1258 CGM, D, Builder, constant, Loc.getAlignment()),
1259 SizeVal, isVolatile);
1260 if (IsAutoInit)
1261 I->addAnnotationMetadata("auto-init");
1262}
1263
1265 Address Loc, bool isVolatile,
1266 CGBuilderTy &Builder) {
1267 llvm::Type *ElTy = Loc.getElementType();
1268 llvm::Constant *constant =
1269 constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
1270 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1271 /*IsAutoInit=*/true);
1272}
1273
1275 Address Loc, bool isVolatile,
1276 CGBuilderTy &Builder) {
1277 llvm::Type *ElTy = Loc.getElementType();
1278 llvm::Constant *constant = constWithPadding(
1279 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1280 assert(!isa<llvm::UndefValue>(constant));
1281 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1282 /*IsAutoInit=*/true);
1283}
1284
1285static bool containsUndef(llvm::Constant *constant) {
1286 auto *Ty = constant->getType();
1287 if (isa<llvm::UndefValue>(constant))
1288 return true;
1289 if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1290 for (llvm::Use &Op : constant->operands())
1291 if (containsUndef(cast<llvm::Constant>(Op)))
1292 return true;
1293 return false;
1294}
1295
1296static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1297 llvm::Constant *constant) {
1298 auto *Ty = constant->getType();
1299 if (isa<llvm::UndefValue>(constant))
1300 return patternOrZeroFor(CGM, isPattern, Ty);
1301 if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1302 return constant;
1303 if (!containsUndef(constant))
1304 return constant;
1305 llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1306 for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1307 auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
1308 Values[Op] = replaceUndef(CGM, isPattern, OpValue);
1309 }
1310 if (Ty->isStructTy())
1311 return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
1312 if (Ty->isArrayTy())
1313 return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
1314 assert(Ty->isVectorTy());
1315 return llvm::ConstantVector::get(Values);
1316}
1317
1318/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1319/// variable declaration with auto, register, or no storage class specifier.
1320/// These turn into simple stack objects, or GlobalValues depending on target.
1322 AutoVarEmission emission = EmitAutoVarAlloca(D);
1323 EmitAutoVarInit(emission);
1324 EmitAutoVarCleanups(emission);
1325}
1326
1327/// Emit a lifetime.begin marker if some criteria are satisfied.
1328/// \return a pointer to the temporary size Value if a marker was emitted, null
1329/// otherwise
1330llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
1331 llvm::Value *Addr) {
1332 if (!ShouldEmitLifetimeMarkers)
1333 return nullptr;
1334
1335 assert(Addr->getType()->getPointerAddressSpace() ==
1336 CGM.getDataLayout().getAllocaAddrSpace() &&
1337 "Pointer should be in alloca address space");
1338 llvm::Value *SizeV = llvm::ConstantInt::get(
1339 Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
1340 llvm::CallInst *C =
1341 Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
1342 C->setDoesNotThrow();
1343 return SizeV;
1344}
1345
1346void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1347 assert(Addr->getType()->getPointerAddressSpace() ==
1348 CGM.getDataLayout().getAllocaAddrSpace() &&
1349 "Pointer should be in alloca address space");
1350 llvm::CallInst *C =
1351 Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
1352 C->setDoesNotThrow();
1353}
1354
1356 CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1357 // For each dimension stores its QualType and corresponding
1358 // size-expression Value.
1361
1362 // Break down the array into individual dimensions.
1363 QualType Type1D = D.getType();
1364 while (getContext().getAsVariableArrayType(Type1D)) {
1365 auto VlaSize = getVLAElements1D(Type1D);
1366 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1367 Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1368 else {
1369 // Generate a locally unique name for the size expression.
1370 Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1371 SmallString<12> Buffer;
1372 StringRef NameRef = Name.toStringRef(Buffer);
1373 auto &Ident = getContext().Idents.getOwn(NameRef);
1374 VLAExprNames.push_back(&Ident);
1375 auto SizeExprAddr =
1376 CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
1377 Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
1378 Dimensions.emplace_back(SizeExprAddr.getPointer(),
1379 Type1D.getUnqualifiedType());
1380 }
1381 Type1D = VlaSize.Type;
1382 }
1383
1384 if (!EmitDebugInfo)
1385 return;
1386
1387 // Register each dimension's size-expression with a DILocalVariable,
1388 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1389 // to describe this array.
1390 unsigned NameIdx = 0;
1391 for (auto &VlaSize : Dimensions) {
1392 llvm::Metadata *MD;
1393 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1394 MD = llvm::ConstantAsMetadata::get(C);
1395 else {
1396 // Create an artificial VarDecl to generate debug info for.
1397 const IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1399 SizeTy->getScalarSizeInBits(), false);
1400 auto *ArtificialDecl = VarDecl::Create(
1401 getContext(), const_cast<DeclContext *>(D.getDeclContext()),
1402 D.getLocation(), D.getLocation(), NameIdent, QT,
1403 getContext().CreateTypeSourceInfo(QT), SC_Auto);
1404 ArtificialDecl->setImplicit();
1405
1406 MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
1407 Builder);
1408 }
1409 assert(MD && "No Size expression debug node created");
1410 DI->registerVLASizeExpression(VlaSize.Type, MD);
1411 }
1412}
1413
1414/// EmitAutoVarAlloca - Emit the alloca and debug information for a
1415/// local variable. Does not emit initialization or destruction.
1416CodeGenFunction::AutoVarEmission
1418 QualType Ty = D.getType();
1419 assert(
1422
1423 AutoVarEmission emission(D);
1424
1425 bool isEscapingByRef = D.isEscapingByref();
1426 emission.IsEscapingByRef = isEscapingByRef;
1427
1428 CharUnits alignment = getContext().getDeclAlign(&D);
1429
1430 // If the type is variably-modified, emit all the VLA sizes for it.
1431 if (Ty->isVariablyModifiedType())
1433
1434 auto *DI = getDebugInfo();
1435 bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1436
1437 Address address = Address::invalid();
1438 RawAddress AllocaAddr = RawAddress::invalid();
1439 Address OpenMPLocalAddr = Address::invalid();
1440 if (CGM.getLangOpts().OpenMPIRBuilder)
1441 OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
1442 else
1443 OpenMPLocalAddr =
1444 getLangOpts().OpenMP
1446 : Address::invalid();
1447
1448 bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1449
1450 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1451 address = OpenMPLocalAddr;
1452 AllocaAddr = OpenMPLocalAddr;
1453 } else if (Ty->isConstantSizeType()) {
1454 // If this value is an array or struct with a statically determinable
1455 // constant initializer, there are optimizations we can do.
1456 //
1457 // TODO: We should constant-evaluate the initializer of any variable,
1458 // as long as it is initialized by a constant expression. Currently,
1459 // isConstantInitializer produces wrong answers for structs with
1460 // reference or bitfield members, and a few other cases, and checking
1461 // for POD-ness protects us from some of these.
1462 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1463 (D.isConstexpr() ||
1464 ((Ty.isPODType(getContext()) ||
1465 getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1466 D.getInit()->isConstantInitializer(getContext(), false)))) {
1467
1468 // If the variable's a const type, and it's neither an NRVO
1469 // candidate nor a __block variable and has no mutable members,
1470 // emit it as a global instead.
1471 // Exception is if a variable is located in non-constant address space
1472 // in OpenCL.
1473 bool NeedsDtor =
1474 D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
1475 if ((!getLangOpts().OpenCL ||
1477 (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1478 !isEscapingByRef &&
1479 Ty.isConstantStorage(getContext(), true, !NeedsDtor))) {
1480 EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1481
1482 // Signal this condition to later callbacks.
1483 emission.Addr = Address::invalid();
1484 assert(emission.wasEmittedAsGlobal());
1485 return emission;
1486 }
1487
1488 // Otherwise, tell the initialization code that we're in this case.
1489 emission.IsConstantAggregate = true;
1490 }
1491
1492 // A normal fixed sized variable becomes an alloca in the entry block,
1493 // unless:
1494 // - it's an NRVO variable.
1495 // - we are compiling OpenMP and it's an OpenMP local variable.
1496 if (NRVO) {
1497 // The named return value optimization: allocate this variable in the
1498 // return slot, so that we can elide the copy when returning this
1499 // variable (C++0x [class.copy]p34).
1500 address = ReturnValue;
1501 AllocaAddr =
1504 ;
1505
1506 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1507 const auto *RD = RecordTy->getDecl();
1508 const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1509 if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1510 RD->isNonTrivialToPrimitiveDestroy()) {
1511 // Create a flag that is used to indicate when the NRVO was applied
1512 // to this variable. Set it to zero to indicate that NRVO was not
1513 // applied.
1514 llvm::Value *Zero = Builder.getFalse();
1515 RawAddress NRVOFlag =
1516 CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
1518 Builder.CreateStore(Zero, NRVOFlag);
1519
1520 // Record the NRVO flag for this variable.
1521 NRVOFlags[&D] = NRVOFlag.getPointer();
1522 emission.NRVOFlag = NRVOFlag.getPointer();
1523 }
1524 }
1525 } else {
1526 CharUnits allocaAlignment;
1527 llvm::Type *allocaTy;
1528 if (isEscapingByRef) {
1529 auto &byrefInfo = getBlockByrefInfo(&D);
1530 allocaTy = byrefInfo.Type;
1531 allocaAlignment = byrefInfo.ByrefAlignment;
1532 } else {
1533 allocaTy = ConvertTypeForMem(Ty);
1534 allocaAlignment = alignment;
1535 }
1536
1537 // Create the alloca. Note that we set the name separately from
1538 // building the instruction so that it's there even in no-asserts
1539 // builds.
1540 address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1541 /*ArraySize=*/nullptr, &AllocaAddr);
1542
1543 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1544 // the catch parameter starts in the catchpad instruction, and we can't
1545 // insert code in those basic blocks.
1546 bool IsMSCatchParam =
1547 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1548
1549 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1550 // if we don't have a valid insertion point (?).
1551 if (HaveInsertPoint() && !IsMSCatchParam) {
1552 // If there's a jump into the lifetime of this variable, its lifetime
1553 // gets broken up into several regions in IR, which requires more work
1554 // to handle correctly. For now, just omit the intrinsics; this is a
1555 // rare case, and it's better to just be conservatively correct.
1556 // PR28267.
1557 //
1558 // We have to do this in all language modes if there's a jump past the
1559 // declaration. We also have to do it in C if there's a jump to an
1560 // earlier point in the current block because non-VLA lifetimes begin as
1561 // soon as the containing block is entered, not when its variables
1562 // actually come into scope; suppressing the lifetime annotations
1563 // completely in this case is unnecessarily pessimistic, but again, this
1564 // is rare.
1565 if (!Bypasses.IsBypassed(&D) &&
1567 llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
1568 emission.SizeForLifetimeMarkers =
1569 EmitLifetimeStart(Size, AllocaAddr.getPointer());
1570 }
1571 } else {
1572 assert(!emission.useLifetimeMarkers());
1573 }
1574 }
1575 } else {
1577
1578 // Delayed globalization for variable length declarations. This ensures that
1579 // the expression representing the length has been emitted and can be used
1580 // by the definition of the VLA. Since this is an escaped declaration, in
1581 // OpenMP we have to use a call to __kmpc_alloc_shared(). The matching
1582 // deallocation call to __kmpc_free_shared() is emitted later.
1583 bool VarAllocated = false;
1584 if (getLangOpts().OpenMPIsTargetDevice) {
1585 auto &RT = CGM.getOpenMPRuntime();
1586 if (RT.isDelayedVariableLengthDecl(*this, &D)) {
1587 // Emit call to __kmpc_alloc_shared() instead of the alloca.
1588 std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
1589 RT.getKmpcAllocShared(*this, &D);
1590
1591 // Save the address of the allocation:
1592 LValue Base = MakeAddrLValue(AddrSizePair.first, D.getType(),
1595 address = Base.getAddress();
1596
1597 // Push a cleanup block to emit the call to __kmpc_free_shared in the
1598 // appropriate location at the end of the scope of the
1599 // __kmpc_alloc_shared functions:
1600 pushKmpcAllocFree(NormalCleanup, AddrSizePair);
1601
1602 // Mark variable as allocated:
1603 VarAllocated = true;
1604 }
1605 }
1606
1607 if (!VarAllocated) {
1608 if (!DidCallStackSave) {
1609 // Save the stack.
1610 Address Stack =
1612
1613 llvm::Value *V = Builder.CreateStackSave();
1614 assert(V->getType() == AllocaInt8PtrTy);
1615 Builder.CreateStore(V, Stack);
1616
1617 DidCallStackSave = true;
1618
1619 // Push a cleanup block and restore the stack there.
1620 // FIXME: in general circumstances, this should be an EH cleanup.
1622 }
1623
1624 auto VlaSize = getVLASize(Ty);
1625 llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1626
1627 // Allocate memory for the array.
1628 address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1629 &AllocaAddr);
1630 }
1631
1632 // If we have debug info enabled, properly describe the VLA dimensions for
1633 // this type by registering the vla size expression for each of the
1634 // dimensions.
1635 EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1636 }
1637
1638 setAddrOfLocalVar(&D, address);
1639 emission.Addr = address;
1640 emission.AllocaAddr = AllocaAddr;
1641
1642 // Emit debug info for local var declaration.
1643 if (EmitDebugInfo && HaveInsertPoint()) {
1644 Address DebugAddr = address;
1645 bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1646 DI->setLocation(D.getLocation());
1647
1648 // If NRVO, use a pointer to the return address.
1649 if (UsePointerValue) {
1650 DebugAddr = ReturnValuePointer;
1651 AllocaAddr = ReturnValuePointer;
1652 }
1653 (void)DI->EmitDeclareOfAutoVariable(&D, AllocaAddr.getPointer(), Builder,
1654 UsePointerValue);
1655 }
1656
1657 if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1658 EmitVarAnnotations(&D, address.emitRawPointer(*this));
1659
1660 // Make sure we call @llvm.lifetime.end.
1661 if (emission.useLifetimeMarkers())
1662 EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
1663 emission.getOriginalAllocatedAddress(),
1664 emission.getSizeForLifetimeMarkers());
1665
1666 return emission;
1667}
1668
1669static bool isCapturedBy(const VarDecl &, const Expr *);
1670
1671/// Determines whether the given __block variable is potentially
1672/// captured by the given statement.
1673static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1674 if (const Expr *E = dyn_cast<Expr>(S))
1675 return isCapturedBy(Var, E);
1676 for (const Stmt *SubStmt : S->children())
1677 if (isCapturedBy(Var, SubStmt))
1678 return true;
1679 return false;
1680}
1681
1682/// Determines whether the given __block variable is potentially
1683/// captured by the given expression.
1684static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1685 // Skip the most common kinds of expressions that make
1686 // hierarchy-walking expensive.
1687 E = E->IgnoreParenCasts();
1688
1689 if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
1690 const BlockDecl *Block = BE->getBlockDecl();
1691 for (const auto &I : Block->captures()) {
1692 if (I.getVariable() == &Var)
1693 return true;
1694 }
1695
1696 // No need to walk into the subexpressions.
1697 return false;
1698 }
1699
1700 if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
1701 const CompoundStmt *CS = SE->getSubStmt();
1702 for (const auto *BI : CS->body())
1703 if (const auto *BIE = dyn_cast<Expr>(BI)) {
1704 if (isCapturedBy(Var, BIE))
1705 return true;
1706 }
1707 else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1708 // special case declarations
1709 for (const auto *I : DS->decls()) {
1710 if (const auto *VD = dyn_cast<VarDecl>((I))) {
1711 const Expr *Init = VD->getInit();
1712 if (Init && isCapturedBy(Var, Init))
1713 return true;
1714 }
1715 }
1716 }
1717 else
1718 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1719 // Later, provide code to poke into statements for capture analysis.
1720 return true;
1721 return false;
1722 }
1723
1724 for (const Stmt *SubStmt : E->children())
1725 if (isCapturedBy(Var, SubStmt))
1726 return true;
1727
1728 return false;
1729}
1730
1731/// Determine whether the given initializer is trivial in the sense
1732/// that it requires no code to be generated.
1734 if (!Init)
1735 return true;
1736
1737 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1738 if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1739 if (Constructor->isTrivial() &&
1740 Constructor->isDefaultConstructor() &&
1741 !Construct->requiresZeroInitialization())
1742 return true;
1743
1744 return false;
1745}
1746
1747void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1748 const VarDecl &D,
1749 Address Loc) {
1750 auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1751 auto trivialAutoVarInitMaxSize =
1752 getContext().getLangOpts().TrivialAutoVarInitMaxSize;
1754 bool isVolatile = type.isVolatileQualified();
1755 if (!Size.isZero()) {
1756 // We skip auto-init variables by their alloc size. Take this as an example:
1757 // "struct Foo {int x; char buff[1024];}" Assume the max-size flag is 1023.
1758 // All Foo type variables will be skipped. Ideally, we only skip the buff
1759 // array and still auto-init X in this example.
1760 // TODO: Improve the size filtering to by member size.
1761 auto allocSize = CGM.getDataLayout().getTypeAllocSize(Loc.getElementType());
1762 switch (trivialAutoVarInit) {
1764 llvm_unreachable("Uninitialized handled by caller");
1766 if (CGM.stopAutoInit())
1767 return;
1768 if (trivialAutoVarInitMaxSize > 0 &&
1769 allocSize > trivialAutoVarInitMaxSize)
1770 return;
1771 emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1772 break;
1774 if (CGM.stopAutoInit())
1775 return;
1776 if (trivialAutoVarInitMaxSize > 0 &&
1777 allocSize > trivialAutoVarInitMaxSize)
1778 return;
1779 emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1780 break;
1781 }
1782 return;
1783 }
1784
1785 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1786 // them, so emit a memcpy with the VLA size to initialize each element.
1787 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1788 // will catch that code, but there exists code which generates zero-sized
1789 // VLAs. Be nice and initialize whatever they requested.
1790 const auto *VlaType = getContext().getAsVariableArrayType(type);
1791 if (!VlaType)
1792 return;
1793 auto VlaSize = getVLASize(VlaType);
1794 auto SizeVal = VlaSize.NumElts;
1795 CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1796 switch (trivialAutoVarInit) {
1798 llvm_unreachable("Uninitialized handled by caller");
1799
1801 if (CGM.stopAutoInit())
1802 return;
1803 if (!EltSize.isOne())
1804 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1805 auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0),
1806 SizeVal, isVolatile);
1807 I->addAnnotationMetadata("auto-init");
1808 break;
1809 }
1810
1812 if (CGM.stopAutoInit())
1813 return;
1814 llvm::Type *ElTy = Loc.getElementType();
1815 llvm::Constant *Constant = constWithPadding(
1816 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1817 CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1818 llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
1819 llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
1820 llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
1821 llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1822 SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
1823 "vla.iszerosized");
1824 Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
1825 EmitBlock(SetupBB);
1826 if (!EltSize.isOne())
1827 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1828 llvm::Value *BaseSizeInChars =
1829 llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
1830 Address Begin = Loc.withElementType(Int8Ty);
1831 llvm::Value *End = Builder.CreateInBoundsGEP(Begin.getElementType(),
1832 Begin.emitRawPointer(*this),
1833 SizeVal, "vla.end");
1834 llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1835 EmitBlock(LoopBB);
1836 llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
1837 Cur->addIncoming(Begin.emitRawPointer(*this), OriginBB);
1838 CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
1839 auto *I =
1840 Builder.CreateMemCpy(Address(Cur, Int8Ty, CurAlign),
1842 CGM, D, Builder, Constant, ConstantAlign),
1843 BaseSizeInChars, isVolatile);
1844 I->addAnnotationMetadata("auto-init");
1845 llvm::Value *Next =
1846 Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
1847 llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
1848 Builder.CreateCondBr(Done, ContBB, LoopBB);
1849 Cur->addIncoming(Next, LoopBB);
1850 EmitBlock(ContBB);
1851 } break;
1852 }
1853}
1854
1855void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1856 assert(emission.Variable && "emission was not valid!");
1857
1858 // If this was emitted as a global constant, we're done.
1859 if (emission.wasEmittedAsGlobal()) return;
1860
1861 const VarDecl &D = *emission.Variable;
1863 QualType type = D.getType();
1864
1865 // If this local has an initializer, emit it now.
1866 const Expr *Init = D.getInit();
1867
1868 // If we are at an unreachable point, we don't need to emit the initializer
1869 // unless it contains a label.
1870 if (!HaveInsertPoint()) {
1871 if (!Init || !ContainsLabel(Init)) return;
1873 }
1874
1875 // Initialize the structure of a __block variable.
1876 if (emission.IsEscapingByRef)
1877 emitByrefStructureInit(emission);
1878
1879 // Initialize the variable here if it doesn't have a initializer and it is a
1880 // C struct that is non-trivial to initialize or an array containing such a
1881 // struct.
1882 if (!Init &&
1883 type.isNonTrivialToPrimitiveDefaultInitialize() ==
1885 LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1886 if (emission.IsEscapingByRef)
1887 drillIntoBlockVariable(*this, Dst, &D);
1889 return;
1890 }
1891
1892 // Check whether this is a byref variable that's potentially
1893 // captured and moved by its own initializer. If so, we'll need to
1894 // emit the initializer first, then copy into the variable.
1895 bool capturedByInit =
1896 Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
1897
1898 bool locIsByrefHeader = !capturedByInit;
1899 const Address Loc =
1900 locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
1901
1902 auto hasNoTrivialAutoVarInitAttr = [&](const Decl *D) {
1903 return D && D->hasAttr<NoTrivialAutoVarInitAttr>();
1904 };
1905 // Note: constexpr already initializes everything correctly.
1906 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1907 ((D.isConstexpr() || D.getAttr<UninitializedAttr>() ||
1908 hasNoTrivialAutoVarInitAttr(type->getAsTagDecl()) ||
1909 hasNoTrivialAutoVarInitAttr(CurFuncDecl))
1911 : getContext().getLangOpts().getTrivialAutoVarInit());
1912
1913 auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1914 if (trivialAutoVarInit ==
1916 return;
1917
1918 // Only initialize a __block's storage: we always initialize the header.
1919 if (emission.IsEscapingByRef && !locIsByrefHeader)
1920 Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
1921
1922 return emitZeroOrPatternForAutoVarInit(type, D, Loc);
1923 };
1924
1926 return initializeWhatIsTechnicallyUninitialized(Loc);
1927
1928 llvm::Constant *constant = nullptr;
1929 if (emission.IsConstantAggregate ||
1930 D.mightBeUsableInConstantExpressions(getContext())) {
1931 assert(!capturedByInit && "constant init contains a capturing block?");
1933 if (constant && !constant->isZeroValue() &&
1934 (trivialAutoVarInit !=
1936 IsPattern isPattern =
1937 (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
1938 ? IsPattern::Yes
1939 : IsPattern::No;
1940 // C guarantees that brace-init with fewer initializers than members in
1941 // the aggregate will initialize the rest of the aggregate as-if it were
1942 // static initialization. In turn static initialization guarantees that
1943 // padding is initialized to zero bits. We could instead pattern-init if D
1944 // has any ImplicitValueInitExpr, but that seems to be unintuitive
1945 // behavior.
1946 constant = constWithPadding(CGM, IsPattern::No,
1947 replaceUndef(CGM, isPattern, constant));
1948 }
1949
1950 if (constant && type->isBitIntType() &&
1952 // Constants for long _BitInt types are split into individual bytes.
1953 // Try to fold these back into an integer constant so it can be stored
1954 // properly.
1955 llvm::Type *LoadType =
1956 CGM.getTypes().convertTypeForLoadStore(type, constant->getType());
1957 constant = llvm::ConstantFoldLoadFromConst(
1958 constant, LoadType, llvm::APInt::getZero(32), CGM.getDataLayout());
1959 }
1960 }
1961
1962 if (!constant) {
1963 if (trivialAutoVarInit !=
1965 // At this point, we know D has an Init expression, but isn't a constant.
1966 // - If D is not a scalar, auto-var-init conservatively (members may be
1967 // left uninitialized by constructor Init expressions for example).
1968 // - If D is a scalar, we only need to auto-var-init if there is a
1969 // self-reference. Otherwise, the Init expression should be sufficient.
1970 // It may be that the Init expression uses other uninitialized memory,
1971 // but auto-var-init here would not help, as auto-init would get
1972 // overwritten by Init.
1973 if (!type->isScalarType() || capturedByInit || isAccessedBy(D, Init)) {
1974 initializeWhatIsTechnicallyUninitialized(Loc);
1975 }
1976 }
1978 lv.setNonGC(true);
1979 return EmitExprAsInit(Init, &D, lv, capturedByInit);
1980 }
1981
1982 if (!emission.IsConstantAggregate) {
1983 // For simple scalar/complex initialization, store the value directly.
1985 lv.setNonGC(true);
1986 return EmitStoreThroughLValue(RValue::get(constant), lv, true);
1987 }
1988
1989 emitStoresForConstant(CGM, D, Loc.withElementType(CGM.Int8Ty),
1990 type.isVolatileQualified(), Builder, constant,
1991 /*IsAutoInit=*/false);
1992}
1993
1994/// Emit an expression as an initializer for an object (variable, field, etc.)
1995/// at the given location. The expression is not necessarily the normal
1996/// initializer for the object, and the address is not necessarily
1997/// its normal location.
1998///
1999/// \param init the initializing expression
2000/// \param D the object to act as if we're initializing
2001/// \param lvalue the lvalue to initialize
2002/// \param capturedByInit true if \p D is a __block variable
2003/// whose address is potentially changed by the initializer
2004void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
2005 LValue lvalue, bool capturedByInit) {
2006 QualType type = D->getType();
2007
2008 if (type->isReferenceType()) {
2009 RValue rvalue = EmitReferenceBindingToExpr(init);
2010 if (capturedByInit)
2011 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
2012 EmitStoreThroughLValue(rvalue, lvalue, true);
2013 return;
2014 }
2015 switch (getEvaluationKind(type)) {
2016 case TEK_Scalar:
2017 EmitScalarInit(init, D, lvalue, capturedByInit);
2018 return;
2019 case TEK_Complex: {
2020 ComplexPairTy complex = EmitComplexExpr(init);
2021 if (capturedByInit)
2022 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
2023 EmitStoreOfComplex(complex, lvalue, /*init*/ true);
2024 return;
2025 }
2026 case TEK_Aggregate:
2027 if (type->isAtomicType()) {
2028 EmitAtomicInit(const_cast<Expr*>(init), lvalue);
2029 } else {
2031 if (isa<VarDecl>(D))
2033 else if (auto *FD = dyn_cast<FieldDecl>(D))
2034 Overlap = getOverlapForFieldInit(FD);
2035 // TODO: how can we delay here if D is captured by its initializer?
2036 EmitAggExpr(init,
2039 AggValueSlot::IsNotAliased, Overlap));
2040 }
2041 return;
2042 }
2043 llvm_unreachable("bad evaluation kind");
2044}
2045
2046/// Enter a destroy cleanup for the given local variable.
2048 const CodeGenFunction::AutoVarEmission &emission,
2049 QualType::DestructionKind dtorKind) {
2050 assert(dtorKind != QualType::DK_none);
2051
2052 // Note that for __block variables, we want to destroy the
2053 // original stack object, not the possibly forwarded object.
2054 Address addr = emission.getObjectAddress(*this);
2055
2056 const VarDecl *var = emission.Variable;
2057 QualType type = var->getType();
2058
2059 CleanupKind cleanupKind = NormalAndEHCleanup;
2060 CodeGenFunction::Destroyer *destroyer = nullptr;
2061
2062 switch (dtorKind) {
2063 case QualType::DK_none:
2064 llvm_unreachable("no cleanup for trivially-destructible variable");
2065
2067 // If there's an NRVO flag on the emission, we need a different
2068 // cleanup.
2069 if (emission.NRVOFlag) {
2070 assert(!type->isArrayType());
2071 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
2072 EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
2073 emission.NRVOFlag);
2074 return;
2075 }
2076 break;
2077
2079 // Suppress cleanups for pseudo-strong variables.
2080 if (var->isARCPseudoStrong()) return;
2081
2082 // Otherwise, consider whether to use an EH cleanup or not.
2083 cleanupKind = getARCCleanupKind();
2084
2085 // Use the imprecise destroyer by default.
2086 if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
2088 break;
2089
2091 break;
2092
2095 if (emission.NRVOFlag) {
2096 assert(!type->isArrayType());
2097 EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
2098 emission.NRVOFlag, type);
2099 return;
2100 }
2101 break;
2102 }
2103
2104 // If we haven't chosen a more specific destroyer, use the default.
2105 if (!destroyer) destroyer = getDestroyer(dtorKind);
2106
2107 // Use an EH cleanup in array destructors iff the destructor itself
2108 // is being pushed as an EH cleanup.
2109 bool useEHCleanup = (cleanupKind & EHCleanup);
2110 EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
2111 useEHCleanup);
2112}
2113
2114void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
2115 assert(emission.Variable && "emission was not valid!");
2116
2117 // If this was emitted as a global constant, we're done.
2118 if (emission.wasEmittedAsGlobal()) return;
2119
2120 // If we don't have an insertion point, we're done. Sema prevents
2121 // us from jumping into any of these scopes anyway.
2122 if (!HaveInsertPoint()) return;
2123
2124 const VarDecl &D = *emission.Variable;
2125
2126 // Check the type for a cleanup.
2127 if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext()))
2128 emitAutoVarTypeCleanup(emission, dtorKind);
2129
2130 // In GC mode, honor objc_precise_lifetime.
2131 if (getLangOpts().getGC() != LangOptions::NonGC &&
2132 D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2133 EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
2134 }
2135
2136 // Handle the cleanup attribute.
2137 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2138 const FunctionDecl *FD = CA->getFunctionDecl();
2139
2140 llvm::Constant *F = CGM.GetAddrOfFunction(FD);
2141 assert(F && "Could not find function!");
2142
2144 EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
2145 }
2146
2147 // If this is a block variable, call _Block_object_destroy
2148 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2149 // mode.
2150 if (emission.IsEscapingByRef &&
2151 CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2153 if (emission.Variable->getType().isObjCGCWeak())
2154 Flags |= BLOCK_FIELD_IS_WEAK;
2155 enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
2156 /*LoadBlockVarAddr*/ false,
2157 cxxDestructorCanThrow(emission.Variable->getType()));
2158 }
2159}
2160
2163 switch (kind) {
2164 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2166 return destroyCXXObject;
2170 return destroyARCWeak;
2173 }
2174 llvm_unreachable("Unknown DestructionKind");
2175}
2176
2177/// pushEHDestroy - Push the standard destructor for the given type as
2178/// an EH-only cleanup.
2180 Address addr, QualType type) {
2181 assert(dtorKind && "cannot push destructor for trivial type");
2182 assert(needsEHCleanup(dtorKind));
2183
2184 pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
2185}
2186
2187/// pushDestroy - Push the standard destructor for the given type as
2188/// at least a normal cleanup.
2190 Address addr, QualType type) {
2191 assert(dtorKind && "cannot push destructor for trivial type");
2192
2193 CleanupKind cleanupKind = getCleanupKind(dtorKind);
2194 pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
2195 cleanupKind & EHCleanup);
2196}
2197
2198void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2199 QualType type, Destroyer *destroyer,
2200 bool useEHCleanupForArray) {
2201 pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
2202 destroyer, useEHCleanupForArray);
2203}
2204
2205// Pushes a destroy and defers its deactivation until its
2206// CleanupDeactivationScope is exited.
2209 assert(dtorKind && "cannot push destructor for trivial type");
2210
2211 CleanupKind cleanupKind = getCleanupKind(dtorKind);
2213 cleanupKind, addr, type, getDestroyer(dtorKind), cleanupKind & EHCleanup);
2214}
2215
2217 CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer,
2218 bool useEHCleanupForArray) {
2219 llvm::Instruction *DominatingIP =
2220 Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
2221 pushDestroy(cleanupKind, addr, type, destroyer, useEHCleanupForArray);
2223 {EHStack.stable_begin(), DominatingIP});
2224}
2225
2227 EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
2228}
2229
2231 CleanupKind Kind, std::pair<llvm::Value *, llvm::Value *> AddrSizePair) {
2232 EHStack.pushCleanup<KmpcAllocFree>(Kind, AddrSizePair);
2233}
2234
2236 Address addr, QualType type,
2237 Destroyer *destroyer,
2238 bool useEHCleanupForArray) {
2239 // If we're not in a conditional branch, we don't need to bother generating a
2240 // conditional cleanup.
2241 if (!isInConditionalBranch()) {
2242 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2243 // around in case a temporary's destructor throws an exception.
2244
2245 // Add the cleanup to the EHStack. After the full-expr, this would be
2246 // deactivated before being popped from the stack.
2247 pushDestroyAndDeferDeactivation(cleanupKind, addr, type, destroyer,
2248 useEHCleanupForArray);
2249
2250 // Since this is lifetime-extended, push it once again to the EHStack after
2251 // the full expression.
2252 return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
2253 cleanupKind, Address::invalid(), addr, type, destroyer,
2254 useEHCleanupForArray);
2255 }
2256
2257 // Otherwise, we should only destroy the object if it's been initialized.
2258
2259 using ConditionalCleanupType =
2261 Destroyer *, bool>;
2263
2264 // Remember to emit cleanup if we branch-out before end of full-expression
2265 // (eg: through stmt-expr or coro suspensions).
2266 AllocaTrackerRAII DeactivationAllocas(*this);
2267 Address ActiveFlagForDeactivation = createCleanupActiveFlag();
2268
2269 pushCleanupAndDeferDeactivation<ConditionalCleanupType>(
2270 cleanupKind, SavedAddr, type, destroyer, useEHCleanupForArray);
2271 initFullExprCleanupWithFlag(ActiveFlagForDeactivation);
2272 EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
2273 // Erase the active flag if the cleanup was not emitted.
2274 cleanup.AddAuxAllocas(std::move(DeactivationAllocas).Take());
2275
2276 // Since this is lifetime-extended, push it once again to the EHStack after
2277 // the full expression.
2278 // The previous active flag would always be 'false' due to forced deferred
2279 // deactivation. Use a separate flag for lifetime-extension to correctly
2280 // remember if this branch was taken and the object was initialized.
2281 Address ActiveFlagForLifetimeExt = createCleanupActiveFlag();
2282 pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
2283 cleanupKind, ActiveFlagForLifetimeExt, SavedAddr, type, destroyer,
2284 useEHCleanupForArray);
2285}
2286
2287/// emitDestroy - Immediately perform the destruction of the given
2288/// object.
2289///
2290/// \param addr - the address of the object; a type*
2291/// \param type - the type of the object; if an array type, all
2292/// objects are destroyed in reverse order
2293/// \param destroyer - the function to call to destroy individual
2294/// elements
2295/// \param useEHCleanupForArray - whether an EH cleanup should be
2296/// used when destroying array elements, in case one of the
2297/// destructions throws an exception
2299 Destroyer *destroyer,
2300 bool useEHCleanupForArray) {
2302 if (!arrayType)
2303 return destroyer(*this, addr, type);
2304
2305 llvm::Value *length = emitArrayLength(arrayType, type, addr);
2306
2307 CharUnits elementAlign =
2308 addr.getAlignment()
2309 .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
2310
2311 // Normally we have to check whether the array is zero-length.
2312 bool checkZeroLength = true;
2313
2314 // But if the array length is constant, we can suppress that.
2315 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
2316 // ...and if it's constant zero, we can just skip the entire thing.
2317 if (constLength->isZero()) return;
2318 checkZeroLength = false;
2319 }
2320
2321 llvm::Value *begin = addr.emitRawPointer(*this);
2322 llvm::Value *end =
2324 emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2325 checkZeroLength, useEHCleanupForArray);
2326}
2327
2328/// emitArrayDestroy - Destroys all the elements of the given array,
2329/// beginning from last to first. The array cannot be zero-length.
2330///
2331/// \param begin - a type* denoting the first element of the array
2332/// \param end - a type* denoting one past the end of the array
2333/// \param elementType - the element type of the array
2334/// \param destroyer - the function to call to destroy elements
2335/// \param useEHCleanup - whether to push an EH cleanup to destroy
2336/// the remaining elements in case the destruction of a single
2337/// element throws
2338void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2339 llvm::Value *end,
2340 QualType elementType,
2341 CharUnits elementAlign,
2342 Destroyer *destroyer,
2343 bool checkZeroLength,
2344 bool useEHCleanup) {
2345 assert(!elementType->isArrayType());
2346
2347 // The basic structure here is a do-while loop, because we don't
2348 // need to check for the zero-element case.
2349 llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
2350 llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
2351
2352 if (checkZeroLength) {
2353 llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
2354 "arraydestroy.isempty");
2355 Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
2356 }
2357
2358 // Enter the loop body, making that address the current address.
2359 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2360 EmitBlock(bodyBB);
2361 llvm::PHINode *elementPast =
2362 Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
2363 elementPast->addIncoming(end, entryBB);
2364
2365 // Shift the address back by one element.
2366 llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
2367 llvm::Type *llvmElementType = ConvertTypeForMem(elementType);
2368 llvm::Value *element = Builder.CreateInBoundsGEP(
2369 llvmElementType, elementPast, negativeOne, "arraydestroy.element");
2370
2371 if (useEHCleanup)
2372 pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
2373 destroyer);
2374
2375 // Perform the actual destruction there.
2376 destroyer(*this, Address(element, llvmElementType, elementAlign),
2377 elementType);
2378
2379 if (useEHCleanup)
2381
2382 // Check whether we've reached the end.
2383 llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
2384 Builder.CreateCondBr(done, doneBB, bodyBB);
2385 elementPast->addIncoming(element, Builder.GetInsertBlock());
2386
2387 // Done.
2388 EmitBlock(doneBB);
2389}
2390
2391/// Perform partial array destruction as if in an EH cleanup. Unlike
2392/// emitArrayDestroy, the element type here may still be an array type.
2394 llvm::Value *begin, llvm::Value *end,
2395 QualType type, CharUnits elementAlign,
2396 CodeGenFunction::Destroyer *destroyer) {
2397 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
2398
2399 // If the element type is itself an array, drill down.
2400 unsigned arrayDepth = 0;
2401 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
2402 // VLAs don't require a GEP index to walk into.
2403 if (!isa<VariableArrayType>(arrayType))
2404 arrayDepth++;
2405 type = arrayType->getElementType();
2406 }
2407
2408 if (arrayDepth) {
2409 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2410
2411 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2412 begin = CGF.Builder.CreateInBoundsGEP(
2413 elemTy, begin, gepIndices, "pad.arraybegin");
2414 end = CGF.Builder.CreateInBoundsGEP(
2415 elemTy, end, gepIndices, "pad.arrayend");
2416 }
2417
2418 // Destroy the array. We don't ever need an EH cleanup because we
2419 // assume that we're in an EH cleanup ourselves, so a throwing
2420 // destructor causes an immediate terminate.
2421 CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2422 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2423}
2424
2425namespace {
2426 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2427 /// array destroy where the end pointer is regularly determined and
2428 /// does not need to be loaded from a local.
2429 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2430 llvm::Value *ArrayBegin;
2431 llvm::Value *ArrayEnd;
2432 QualType ElementType;
2433 CodeGenFunction::Destroyer *Destroyer;
2434 CharUnits ElementAlign;
2435 public:
2436 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2437 QualType elementType, CharUnits elementAlign,
2438 CodeGenFunction::Destroyer *destroyer)
2439 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2440 ElementType(elementType), Destroyer(destroyer),
2441 ElementAlign(elementAlign) {}
2442
2443 void Emit(CodeGenFunction &CGF, Flags flags) override {
2444 emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2445 ElementType, ElementAlign, Destroyer);
2446 }
2447 };
2448
2449 /// IrregularPartialArrayDestroy - a cleanup which performs a
2450 /// partial array destroy where the end pointer is irregularly
2451 /// determined and must be loaded from a local.
2452 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2453 llvm::Value *ArrayBegin;
2454 Address ArrayEndPointer;
2455 QualType ElementType;
2456 CodeGenFunction::Destroyer *Destroyer;
2457 CharUnits ElementAlign;
2458 public:
2459 IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2460 Address arrayEndPointer,
2461 QualType elementType,
2462 CharUnits elementAlign,
2463 CodeGenFunction::Destroyer *destroyer)
2464 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2465 ElementType(elementType), Destroyer(destroyer),
2466 ElementAlign(elementAlign) {}
2467
2468 void Emit(CodeGenFunction &CGF, Flags flags) override {
2469 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
2470 emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2471 ElementType, ElementAlign, Destroyer);
2472 }
2473 };
2474} // end anonymous namespace
2475
2476/// pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to
2477/// destroy already-constructed elements of the given array. The cleanup may be
2478/// popped with DeactivateCleanupBlock or PopCleanupBlock.
2479///
2480/// \param elementType - the immediate element type of the array;
2481/// possibly still an array type
2482void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2483 Address arrayEndPointer,
2484 QualType elementType,
2485 CharUnits elementAlign,
2486 Destroyer *destroyer) {
2487 pushFullExprCleanup<IrregularPartialArrayDestroy>(
2488 NormalAndEHCleanup, arrayBegin, arrayEndPointer, elementType,
2489 elementAlign, destroyer);
2490}
2491
2492/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2493/// already-constructed elements of the given array. The cleanup
2494/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2495///
2496/// \param elementType - the immediate element type of the array;
2497/// possibly still an array type
2498void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2499 llvm::Value *arrayEnd,
2500 QualType elementType,
2501 CharUnits elementAlign,
2502 Destroyer *destroyer) {
2503 pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
2504 arrayBegin, arrayEnd,
2505 elementType, elementAlign,
2506 destroyer);
2507}
2508
2509/// Lazily declare the @llvm.lifetime.start intrinsic.
2511 if (LifetimeStartFn)
2512 return LifetimeStartFn;
2513 LifetimeStartFn = llvm::Intrinsic::getOrInsertDeclaration(
2514 &getModule(), llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2515 return LifetimeStartFn;
2516}
2517
2518/// Lazily declare the @llvm.lifetime.end intrinsic.
2520 if (LifetimeEndFn)
2521 return LifetimeEndFn;
2522 LifetimeEndFn = llvm::Intrinsic::getOrInsertDeclaration(
2523 &getModule(), llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2524 return LifetimeEndFn;
2525}
2526
2527namespace {
2528 /// A cleanup to perform a release of an object at the end of a
2529 /// function. This is used to balance out the incoming +1 of a
2530 /// ns_consumed argument when we can't reasonably do that just by
2531 /// not doing the initial retain for a __block argument.
2532 struct ConsumeARCParameter final : EHScopeStack::Cleanup {
2533 ConsumeARCParameter(llvm::Value *param,
2534 ARCPreciseLifetime_t precise)
2535 : Param(param), Precise(precise) {}
2536
2537 llvm::Value *Param;
2538 ARCPreciseLifetime_t Precise;
2539
2540 void Emit(CodeGenFunction &CGF, Flags flags) override {
2541 CGF.EmitARCRelease(Param, Precise);
2542 }
2543 };
2544} // end anonymous namespace
2545
2546/// Emit an alloca (or GlobalValue depending on target)
2547/// for the specified parameter and set up LocalDeclMap.
2548void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2549 unsigned ArgNo) {
2550 bool NoDebugInfo = false;
2551 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2552 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2553 "Invalid argument to EmitParmDecl");
2554
2555 // Set the name of the parameter's initial value to make IR easier to
2556 // read. Don't modify the names of globals.
2557 if (!isa<llvm::GlobalValue>(Arg.getAnyValue()))
2558 Arg.getAnyValue()->setName(D.getName());
2559
2560 QualType Ty = D.getType();
2561
2562 // Use better IR generation for certain implicit parameters.
2563 if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
2564 // The only implicit argument a block has is its literal.
2565 // This may be passed as an inalloca'ed value on Windows x86.
2566 if (BlockInfo) {
2567 llvm::Value *V = Arg.isIndirect()
2568 ? Builder.CreateLoad(Arg.getIndirectAddress())
2569 : Arg.getDirectValue();
2570 setBlockContextParameter(IPD, ArgNo, V);
2571 return;
2572 }
2573 // Suppressing debug info for ThreadPrivateVar parameters, else it hides
2574 // debug info of TLS variables.
2575 NoDebugInfo =
2576 (IPD->getParameterKind() == ImplicitParamKind::ThreadPrivateVar);
2577 }
2578
2579 Address DeclPtr = Address::invalid();
2580 RawAddress AllocaPtr = Address::invalid();
2581 bool DoStore = false;
2582 bool IsScalar = hasScalarEvaluationKind(Ty);
2583 bool UseIndirectDebugAddress = false;
2584
2585 // If we already have a pointer to the argument, reuse the input pointer.
2586 if (Arg.isIndirect()) {
2587 DeclPtr = Arg.getIndirectAddress();
2588 DeclPtr = DeclPtr.withElementType(ConvertTypeForMem(Ty));
2589 // Indirect argument is in alloca address space, which may be different
2590 // from the default address space.
2591 auto AllocaAS = CGM.getASTAllocaAddressSpace();
2592 auto *V = DeclPtr.emitRawPointer(*this);
2593 AllocaPtr = RawAddress(V, DeclPtr.getElementType(), DeclPtr.getAlignment());
2594
2595 // For truly ABI indirect arguments -- those that are not `byval` -- store
2596 // the address of the argument on the stack to preserve debug information.
2597 ABIArgInfo ArgInfo = CurFnInfo->arguments()[ArgNo - 1].info;
2598 if (ArgInfo.isIndirect())
2599 UseIndirectDebugAddress = !ArgInfo.getIndirectByVal();
2600 if (UseIndirectDebugAddress) {
2601 auto PtrTy = getContext().getPointerType(Ty);
2602 AllocaPtr = CreateMemTemp(PtrTy, getContext().getTypeAlignInChars(PtrTy),
2603 D.getName() + ".indirect_addr");
2604 EmitStoreOfScalar(V, AllocaPtr, /* Volatile */ false, PtrTy);
2605 }
2606
2607 auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2608 auto DestLangAS =
2610 if (SrcLangAS != DestLangAS) {
2611 assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2612 CGM.getDataLayout().getAllocaAddrSpace());
2613 auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
2614 auto *T = llvm::PointerType::get(getLLVMContext(), DestAS);
2615 DeclPtr =
2616 DeclPtr.withPointer(getTargetHooks().performAddrSpaceCast(
2617 *this, V, SrcLangAS, DestLangAS, T, true),
2618 DeclPtr.isKnownNonNull());
2619 }
2620
2621 // Push a destructor cleanup for this parameter if the ABI requires it.
2622 // Don't push a cleanup in a thunk for a method that will also emit a
2623 // cleanup.
2624 if (Ty->isRecordType() && !CurFuncIsThunk &&
2626 if (QualType::DestructionKind DtorKind =
2627 D.needsDestruction(getContext())) {
2628 assert((DtorKind == QualType::DK_cxx_destructor ||
2629 DtorKind == QualType::DK_nontrivial_c_struct) &&
2630 "unexpected destructor type");
2631 pushDestroy(DtorKind, DeclPtr, Ty);
2632 CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
2634 }
2635 }
2636 } else {
2637 // Check if the parameter address is controlled by OpenMP runtime.
2638 Address OpenMPLocalAddr =
2639 getLangOpts().OpenMP
2641 : Address::invalid();
2642 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2643 DeclPtr = OpenMPLocalAddr;
2644 AllocaPtr = DeclPtr;
2645 } else {
2646 // Otherwise, create a temporary to hold the value.
2647 DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2648 D.getName() + ".addr", &AllocaPtr);
2649 }
2650 DoStore = true;
2651 }
2652
2653 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2654
2655 LValue lv = MakeAddrLValue(DeclPtr, Ty);
2656 if (IsScalar) {
2657 Qualifiers qs = Ty.getQualifiers();
2659 // We honor __attribute__((ns_consumed)) for types with lifetime.
2660 // For __strong, it's handled by just skipping the initial retain;
2661 // otherwise we have to balance out the initial +1 with an extra
2662 // cleanup to do the release at the end of the function.
2663 bool isConsumed = D.hasAttr<NSConsumedAttr>();
2664
2665 // If a parameter is pseudo-strong then we can omit the implicit retain.
2666 if (D.isARCPseudoStrong()) {
2667 assert(lt == Qualifiers::OCL_Strong &&
2668 "pseudo-strong variable isn't strong?");
2669 assert(qs.hasConst() && "pseudo-strong variable should be const!");
2671 }
2672
2673 // Load objects passed indirectly.
2674 if (Arg.isIndirect() && !ArgVal)
2675 ArgVal = Builder.CreateLoad(DeclPtr);
2676
2677 if (lt == Qualifiers::OCL_Strong) {
2678 if (!isConsumed) {
2679 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2680 // use objc_storeStrong(&dest, value) for retaining the
2681 // object. But first, store a null into 'dest' because
2682 // objc_storeStrong attempts to release its old value.
2683 llvm::Value *Null = CGM.EmitNullConstant(D.getType());
2684 EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
2685 EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
2686 DoStore = false;
2687 }
2688 else
2689 // Don't use objc_retainBlock for block pointers, because we
2690 // don't want to Block_copy something just because we got it
2691 // as a parameter.
2692 ArgVal = EmitARCRetainNonBlock(ArgVal);
2693 }
2694 } else {
2695 // Push the cleanup for a consumed parameter.
2696 if (isConsumed) {
2697 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2699 EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
2700 precise);
2701 }
2702
2703 if (lt == Qualifiers::OCL_Weak) {
2704 EmitARCInitWeak(DeclPtr, ArgVal);
2705 DoStore = false; // The weak init is a store, no need to do two.
2706 }
2707 }
2708
2709 // Enter the cleanup scope.
2710 EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
2711 }
2712 }
2713
2714 // Store the initial value into the alloca.
2715 if (DoStore)
2716 EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
2717
2718 setAddrOfLocalVar(&D, DeclPtr);
2719
2720 // Emit debug info for param declarations in non-thunk functions.
2721 if (CGDebugInfo *DI = getDebugInfo()) {
2723 !NoDebugInfo) {
2724 llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
2725 &D, AllocaPtr.getPointer(), ArgNo, Builder, UseIndirectDebugAddress);
2726 if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D))
2727 DI->getParamDbgMappings().insert({Var, DILocalVar});
2728 }
2729 }
2730
2731 if (D.hasAttr<AnnotateAttr>())
2732 EmitVarAnnotations(&D, DeclPtr.emitRawPointer(*this));
2733
2734 // We can only check return value nullability if all arguments to the
2735 // function satisfy their nullability preconditions. This makes it necessary
2736 // to emit null checks for args in the function body itself.
2737 if (requiresReturnValueNullabilityCheck()) {
2738 auto Nullability = Ty->getNullability();
2739 if (Nullability && *Nullability == NullabilityKind::NonNull) {
2740 SanitizerScope SanScope(this);
2741 RetValNullabilityPrecondition =
2742 Builder.CreateAnd(RetValNullabilityPrecondition,
2743 Builder.CreateIsNotNull(Arg.getAnyValue()));
2744 }
2745 }
2746}
2747
2749 CodeGenFunction *CGF) {
2750 if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2751 return;
2753}
2754
2756 CodeGenFunction *CGF) {
2757 if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2758 (!LangOpts.EmitAllDecls && !D->isUsed()))
2759 return;
2761}
2762
2765}
2766
2768 for (const Expr *E : D->varlist()) {
2769 const auto *DE = cast<DeclRefExpr>(E);
2770 const auto *VD = cast<VarDecl>(DE->getDecl());
2771
2772 // Skip all but globals.
2773 if (!VD->hasGlobalStorage())
2774 continue;
2775
2776 // Check if the global has been materialized yet or not. If not, we are done
2777 // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2778 // we already emitted the global we might have done so before the
2779 // OMPAllocateDeclAttr was attached, leading to the wrong address space
2780 // (potentially). While not pretty, common practise is to remove the old IR
2781 // global and generate a new one, so we do that here too. Uses are replaced
2782 // properly.
2783 StringRef MangledName = getMangledName(VD);
2784 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
2785 if (!Entry)
2786 continue;
2787
2788 // We can also keep the existing global if the address space is what we
2789 // expect it to be, if not, it is replaced.
2790 QualType ASTTy = VD->getType();
2792 auto TargetAS = getContext().getTargetAddressSpace(GVAS);
2793 if (Entry->getType()->getAddressSpace() == TargetAS)
2794 continue;
2795
2796 // Make a new global with the correct type / address space.
2797 llvm::Type *Ty = getTypes().ConvertTypeForMem(ASTTy);
2798 llvm::PointerType *PTy = llvm::PointerType::get(Ty, TargetAS);
2799
2800 // Replace all uses of the old global with a cast. Since we mutate the type
2801 // in place we neeed an intermediate that takes the spot of the old entry
2802 // until we can create the cast.
2803 llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
2804 getModule(), Entry->getValueType(), false,
2805 llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
2806 llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
2807 Entry->replaceAllUsesWith(DummyGV);
2808
2809 Entry->mutateType(PTy);
2810 llvm::Constant *NewPtrForOldDecl =
2811 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2812 Entry, DummyGV->getType());
2813
2814 // Now we have a casted version of the changed global, the dummy can be
2815 // replaced and deleted.
2816 DummyGV->replaceAllUsesWith(NewPtrForOldDecl);
2817 DummyGV->eraseFromParent();
2818 }
2819}
2820
2821std::optional<CharUnits>
2823 if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) {
2824 if (Expr *Alignment = AA->getAlignment()) {
2825 unsigned UserAlign =
2826 Alignment->EvaluateKnownConstInt(getContext()).getExtValue();
2827 CharUnits NaturalAlign =
2829
2830 // OpenMP5.1 pg 185 lines 7-10
2831 // Each item in the align modifier list must be aligned to the maximum
2832 // of the specified alignment and the type's natural alignment.
2834 std::max<unsigned>(UserAlign, NaturalAlign.getQuantity()));
2835 }
2836 }
2837 return std::nullopt;
2838}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3443
static void emitStoresForInitAfterBZero(CodeGenModule &CGM, llvm::Constant *Init, Address Loc, bool isVolatile, CGBuilderTy &Builder, bool IsAutoInit)
For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit the scalar stores that woul...
Definition: CGDecl.cpp:923
static bool isCapturedBy(const VarDecl &, const Expr *)
Determines whether the given __block variable is potentially captured by the given expression.
Definition: CGDecl.cpp:1684
static void emitPartialArrayDestroy(CodeGenFunction &CGF, llvm::Value *begin, llvm::Value *end, QualType type, CharUnits elementAlign, CodeGenFunction::Destroyer *destroyer)
Perform partial array destruction as if in an EH cleanup.
Definition: CGDecl.cpp:2393
static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D, Address Loc, bool isVolatile, CGBuilderTy &Builder)
Definition: CGDecl.cpp:1274
static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init, unsigned &NumStores)
Decide whether we can emit the non-zero parts of the specified initializer with equal or fewer than N...
Definition: CGDecl.cpp:885
static llvm::Constant * patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern, llvm::Type *Ty)
Generate a constant filled with either a pattern or zeroes.
Definition: CGDecl.cpp:1017
static llvm::Constant * constWithPadding(CodeGenModule &CGM, IsPattern isPattern, llvm::Constant *constant)
Replace all padding bytes in a given constant with either a pattern byte or 0x00.
Definition: CGDecl.cpp:1069
static llvm::Value * shouldUseMemSetToInitialize(llvm::Constant *Init, uint64_t GlobalSize, const llvm::DataLayout &DL)
Decide whether we should use memset to initialize a local variable instead of using a memcpy from a c...
Definition: CGDecl.cpp:991
IsPattern
Definition: CGDecl.cpp:1014
static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D)
Definition: CGDecl.cpp:222
static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D, Address Loc, bool isVolatile, CGBuilderTy &Builder, llvm::Constant *constant, bool IsAutoInit)
Definition: CGDecl.cpp:1164
static bool shouldSplitConstantStore(CodeGenModule &CGM, uint64_t GlobalByteSize)
Decide whether we want to split a constant structure or array store into a sequence of its fields' st...
Definition: CGDecl.cpp:1003
static llvm::Constant * replaceUndef(CodeGenModule &CGM, IsPattern isPattern, llvm::Constant *constant)
Definition: CGDecl.cpp:1296
static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF, const LValue &destLV, const Expr *init)
Definition: CGDecl.cpp:691
static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init, uint64_t GlobalSize)
Decide whether we should use bzero plus some stores to initialize a local variable instead of using a...
Definition: CGDecl.cpp:970
static llvm::Constant * constStructWithPadding(CodeGenModule &CGM, IsPattern isPattern, llvm::StructType *STy, llvm::Constant *constant)
Helper function for constWithPadding() to deal with padding in structures.
Definition: CGDecl.cpp:1029
static bool containsUndef(llvm::Constant *constant)
Definition: CGDecl.cpp:1285
static bool isAccessedBy(const VarDecl &var, const Stmt *s)
Definition: CGDecl.cpp:659
static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var, Address addr, Qualifiers::ObjCLifetime lifetime)
EmitAutoVarWithLifetime - Does the setup required for an automatic variable with lifetime.
Definition: CGDecl.cpp:623
static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM, const VarDecl &D, CGBuilderTy &Builder, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1155
static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D, Address Loc, bool isVolatile, CGBuilderTy &Builder)
Definition: CGDecl.cpp:1264
static void drillIntoBlockVariable(CodeGenFunction &CGF, LValue &lvalue, const VarDecl *var)
Definition: CGDecl.cpp:740
CodeGenFunction::ComplexPairTy ComplexPairTy
const Decl * D
Expr * E
This file defines OpenMP nodes for declarative directives.
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
static const NamedDecl * getDefinition(const Decl *D)
Definition: SemaDecl.cpp:2890
SourceLocation Loc
Definition: SemaObjC.cpp:759
SourceLocation Begin
__device__ __2f16 float __ockl_bool s
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
IdentifierTable & Idents
Definition: ASTContext.h:680
const LangOptions & getLangOpts() const
Definition: ASTContext.h:834
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2918
unsigned getTargetAddressSpace(LangAS AS) const
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3577
Represents a block literal declaration, which is like an unnamed FunctionDecl.
Definition: Decl.h:4474
ArrayRef< Capture > captures() const
Definition: Decl.h:4601
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:6414
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2553
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1375
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2817
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:125
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition: Address.h:259
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
Definition: Address.h:231
bool isValid() const
Definition: Address.h:177
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:602
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:903
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:398
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
Definition: CGBuilder.h:158
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:365
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:346
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:137
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl)
Emit information about a global variable.
Param2DILocTy & getParamDbgMappings()
Definition: CGDebugInfo.h:620
llvm::DILocalVariable * EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI, unsigned ArgNo, CGBuilderTy &Builder, bool UsePointerValue=false)
Emit call to llvm.dbg.declare for an argument variable declaration.
llvm::DILocalVariable * EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI, CGBuilderTy &Builder, const bool UsePointerValue=false)
Emit call to llvm.dbg.declare for an automatic variable declaration.
void setLocation(SourceLocation Loc)
Update the current source location.
void registerVLASizeExpression(QualType Ty, llvm::Metadata *SizeExpr)
Register VLA size expression debug node with the qualified type.
Definition: CGDebugInfo.h:425
CGFunctionInfo - Class to encapsulate the information about a function definition.
const_arg_iterator arg_begin() const
MutableArrayRef< ArgInfo > arguments()
virtual void EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF, const VarDecl &D)
Emit the IR required for a work-group-local variable declaration, and add an entry to CGF's LocalDecl...
Allows to disable automatic handling of functions used in target regions as those marked as omp decla...
virtual void getKmpcFreeShared(CodeGenFunction &CGF, const std::pair< llvm::Value *, llvm::Value * > &AddrSizePair)
Get call to __kmpc_free_shared.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF=nullptr)
Emit the function for the user defined mapper construct.
virtual void processRequiresDirective(const OMPRequiresDecl *D)
Perform check on requires decl to ensure that target architecture supports unified addressing.
virtual std::pair< llvm::Value *, llvm::Value * > getKmpcAllocShared(CodeGenFunction &CGF, const VarDecl *VD)
Get call to __kmpc_alloc_shared.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D)
Emit code for the specified user defined reduction construct.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
void add(RValue rvalue, QualType type)
Definition: CGCall.h:305
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void emitAutoVarTypeCleanup(const AutoVarEmission &emission, QualType::DestructionKind dtorKind)
void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags, bool LoadBlockVarAddr, bool CanThrow)
Enter a cleanup to destroy a __block variable.
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
static Destroyer destroyNonTrivialCStruct
static bool cxxDestructorCanThrow(QualType T)
Check if T is a C++ class that has a destructor that can throw.
SanitizerSet SanOpts
Sanitizers enabled for this function.
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
void EmitARCMoveWeak(Address dst, Address src)
void EmitAutoVarDecl(const VarDecl &D)
EmitAutoVarDecl - Emit an auto variable declaration.
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
static bool hasScalarEvaluationKind(QualType T)
const BlockByrefInfo & getBlockByrefInfo(const VarDecl *var)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitExtendGCLifetime(llvm::Value *object)
EmitExtendGCLifetime - Given a pointer to an Objective-C object, make sure it survives garbage collec...
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
void emitArrayDestroy(llvm::Value *begin, llvm::Value *end, QualType elementType, CharUnits elementAlign, Destroyer *destroyer, bool checkZeroLength, bool useEHCleanup)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
const CodeGen::CGBlockInfo * BlockInfo
void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
EmitExprAsInit - Emits the code necessary to initialize a location in memory with the given initializ...
void emitByrefStructureInit(const AutoVarEmission &emission)
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
@ TCK_NonnullAssign
Checking the value assigned to a _Nonnull pointer. Must not be null.
llvm::Value * EmitARCStoreStrongCall(Address addr, llvm::Value *value, bool resultIgnored)
llvm::Type * ConvertTypeForMem(QualType T)
llvm::Value * EmitARCUnsafeUnretainedScalarExpr(const Expr *expr)
void EmitAutoVarInit(const AutoVarEmission &emission)
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
DominatingValue< T >::saved_type saveValueInCond(T value)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
void EmitAtomicInit(Expr *E, LValue lvalue)
const TargetInfo & getTarget() const
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
void emitDestroy(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit)
Emit code in this function to perform a guarded variable initialization.
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
void initFullExprCleanupWithFlag(RawAddress ActiveFlag)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
void EmitARCCopyWeak(Address dst, Address src)
void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum, llvm::Value *ptr)
void defaultInitNonTrivialCStructVar(LValue Dst)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
bool isTrivialInitializer(const Expr *Init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
LValue EmitDeclRefLValue(const DeclRefExpr *E)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
llvm::Value * EmitARCRetainAutoreleaseScalarExpr(const Expr *expr)
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
void EmitVarDecl(const VarDecl &D)
EmitVarDecl - Emit a local variable declaration.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
void EmitAutoVarCleanups(const AutoVarEmission &emission)
llvm::GlobalVariable * AddInitializerToStaticVarDecl(const VarDecl &D, llvm::GlobalVariable *GV)
AddInitializerToStaticVarDecl - Add the initializer for 'D' to the global variable that has already b...
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
llvm::Type * ConvertType(QualType T)
void EmitARCInitWeak(Address addr, llvm::Value *value)
static Destroyer destroyARCStrongPrecise
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
void pushStackRestore(CleanupKind kind, Address SPMem)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const CGFunctionInfo * CurFnInfo
void pushKmpcAllocFree(CleanupKind Kind, std::pair< llvm::Value *, llvm::Value * > AddrSizePair)
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
static Destroyer destroyARCStrongImprecise
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo)
Emits the alloca and debug information for the size expressions for each dimension of an array.
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
bool hasLabelBeenSeenInCurrentScope() const
Return true if a label was seen in the current scope.
This class organizes the cross-function state that is used while generating LLVM code.
StringRef getBlockMangledName(GlobalDecl GD, const BlockDecl *BD)
void setGVProperties(llvm::GlobalValue *GV, GlobalDecl GD) const
Set visibility, dllimport/dllexport and dso_local.
llvm::Module & getModule() const
void setStaticLocalDeclAddress(const VarDecl *D, llvm::Constant *C)
llvm::Function * getLLVMLifetimeStartFn()
Lazily declare the @llvm.lifetime.start intrinsic.
Definition: CGDecl.cpp:2510
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
Address createUnnamedGlobalFrom(const VarDecl &D, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1105
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGOpenCLRuntime & getOpenCLRuntime()
Return a reference to the configured OpenCL runtime.
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
void EmitOMPAllocateDecl(const OMPAllocateDecl *D)
Emit a code for the allocate directive.
Definition: CGDecl.cpp:2767
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
const llvm::DataLayout & getDataLayout() const
void addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.compiler.used metadata.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
llvm::Constant * getOrCreateStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
Definition: CGDecl.cpp:245
llvm::Constant * GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition=NotForDefinition)
void AddGlobalAnnotations(const ValueDecl *D, llvm::GlobalValue *GV)
Add global annotations that are set on D, for the global GV.
void setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const
Set the TLS mode for the given LLVM GlobalValue for the thread-local variable declaration D.
ASTContext & getContext() const
void EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF=nullptr)
Emit a code for declare mapper construct.
Definition: CGDecl.cpp:2755
llvm::Function * getLLVMLifetimeEndFn()
Lazily declare the @llvm.lifetime.end intrinsic.
Definition: CGDecl.cpp:2519
void EmitOMPRequiresDecl(const OMPRequiresDecl *D)
Emit a code for requires directive.
Definition: CGDecl.cpp:2763
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
std::optional< CharUnits > getOMPAllocateAlignment(const VarDecl *VD)
Return the alignment specified in an allocate directive, if present.
Definition: CGDecl.cpp:2822
llvm::LLVMContext & getLLVMContext()
llvm::GlobalValue * GetGlobalValue(StringRef Ref)
void EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D, CodeGenFunction *CGF=nullptr)
Emit a code for declare reduction construct.
Definition: CGDecl.cpp:2748
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
LangAS GetGlobalVarAddressSpace(const VarDecl *D)
Return the AST address space of the underlying global variable for D, as determined by its declaratio...
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * convertTypeForLoadStore(QualType T, llvm::Type *LLVMTy=nullptr)
Given that T is a scalar type, return the IR type that should be used for load and store operations.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
bool typeRequiresSplitIntoByteArray(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
Check whether the given type needs to be laid out in memory using an opaque byte-array type because i...
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type.
Definition: CGCall.cpp:462
llvm::Constant * tryEmitAbstractForInitializer(const VarDecl &D)
Try to emit the initializer of the given declaration as an abstract constant.
A cleanup scope which generates the cleanup blocks lazily.
Definition: CGCleanup.h:243
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:141
ConditionalCleanup stores the saved form of its parameters, then restores them and performs the clean...
Definition: EHScopeStack.h:203
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
iterator begin() const
Returns an iterator pointing to the innermost EH scope.
Definition: CGCleanup.h:615
LValue - This represents an lvalue references.
Definition: CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition: CGValue.h:361
QualType getType() const
Definition: CGValue.h:291
void setNonGC(bool Value)
Definition: CGValue.h:304
void setAddress(Address address)
Definition: CGValue.h:363
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:293
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
An abstract representation of an aligned address.
Definition: Address.h:42
llvm::Value * getPointer() const
Definition: Address.h:66
static RawAddress invalid()
Definition: Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:386
void reportGlobal(llvm::GlobalVariable *GV, const VarDecl &D, bool IsDynInit=false)
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
Definition: TargetInfo.h:76
bool IsBypassed(const VarDecl *D) const
Returns true if the variable declaration was by bypassed by any goto or switch statement.
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1628
body_range body()
Definition: Stmt.h:1691
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Definition: DeclBase.h:1435
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
const DeclContext * getParentFunctionOrMethod(bool LexicalParent=false) const
If this decl is defined inside a function/method/block it returns the corresponding DeclContext,...
Definition: DeclBase.cpp:314
T * getAttr() const
Definition: DeclBase.h:576
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
Definition: DeclBase.cpp:1243
SourceLocation getLocation() const
Definition: DeclBase.h:442
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition: DeclBase.cpp:549
DeclContext * getDeclContext()
Definition: DeclBase.h:451
bool hasAttr() const
Definition: DeclBase.h:580
Kind getKind() const
Definition: DeclBase.h:445
This represents one expression.
Definition: Expr.h:110
bool isXValue() const
Definition: Expr.h:279
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition: Expr.cpp:3095
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3086
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition: Expr.h:277
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a function declaration or definition.
Definition: Decl.h:1935
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
const Decl * getDecl() const
Definition: GlobalDecl.h:103
One of these records is kept for each identifier that is lexed.
IdentifierInfo & getOwn(StringRef Name)
Gets an IdentifierInfo for the given name without consulting external sources.
This represents '#pragma omp allocate ...' directive.
Definition: DeclOpenMP.h:474
This represents '#pragma omp declare mapper ...' directive.
Definition: DeclOpenMP.h:287
This represents '#pragma omp declare reduction ...' directive.
Definition: DeclOpenMP.h:177
This represents '#pragma omp requires...' directive.
Definition: DeclOpenMP.h:417
A (possibly-)qualified type.
Definition: Type.h:929
@ DK_cxx_destructor
Definition: Type.h:1521
@ DK_nontrivial_c_struct
Definition: Type.h:1524
@ DK_objc_weak_lifetime
Definition: Type.h:1523
@ DK_objc_strong_lifetime
Definition: Type.h:1522
@ PDIK_Struct
The type is a struct containing a field whose type is not PCK_Trivial.
Definition: Type.h:1467
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:8057
bool isConstant(const ASTContext &Ctx) const
Definition: Type.h:1089
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:7971
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: Type.h:1433
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: Type.h:8134
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:8025
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition: Type.h:1028
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition: Type.cpp:2641
The collection of all-type qualifiers we support.
Definition: Type.h:324
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: Type.h:354
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: Type.h:347
@ OCL_None
There is no lifetime qualification on this type.
Definition: Type.h:343
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: Type.h:357
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: Type.h:360
bool hasConst() const
Definition: Type.h:450
ObjCLifetime getObjCLifetime() const
Definition: Type.h:538
bool isParamDestroyedInCallee() const
Definition: Decl.h:4290
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:6072
RecordDecl * getDecl() const
Definition: Type.h:6082
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
static const uint64_t MaximumAlignment
Definition: Sema.h:839
Encodes a location in the source.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4466
Stmt - This represents one statement.
Definition: Stmt.h:84
child_range children()
Definition: Stmt.cpp:294
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
Definition: TargetCXXABI.h:136
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1333
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
Definition: Type.cpp:2386
bool isArrayType() const
Definition: Type.h:8258
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8800
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2724
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8731
bool isRecordType() const
Definition: Type.h:8286
std::optional< NullabilityKind > getNullability() const
Determine the nullability of the given type.
Definition: Type.cpp:4763
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:671
QualType getType() const
Definition: Decl.h:682
Represents a variable declaration or definition.
Definition: Decl.h:882
static VarDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, StorageClass S)
Definition: Decl.cpp:2140
bool hasGlobalStorage() const
Returns true for all variables that do not have local storage.
Definition: Decl.h:1177
const Expr * getInit() const
Definition: Decl.h:1319
bool isLocalVarDecl() const
Returns true for local variable declarations other than parameters.
Definition: Decl.h:1204
Defines the clang::TargetInfo interface.
@ BLOCK_FIELD_IS_BYREF
Definition: CGBlocks.h:92
@ BLOCK_FIELD_IS_WEAK
Definition: CGBlocks.h:94
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
Definition: PatternInit.cpp:15
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:84
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
ARCPreciseLifetime_t
Does an ARC strong l-value have precise lifetime?
Definition: CGValue.h:135
@ ARCPreciseLifetime
Definition: CGValue.h:136
@ ARCImpreciseLifetime
Definition: CGValue.h:136
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicAllOfMatcher< Decl > decl
Matches declarations.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
constexpr Variable var(Literal L)
Returns the variable of L.
Definition: CNFFormula.h:64
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:3869
bool Null(InterpState &S, CodePtr OpPC, uint64_t Value, const Descriptor *Desc)
Definition: Interp.h:2424
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2408
The JSON file list parser is used to communicate input to InstallAPI.
@ Ctor_Base
Base object ctor.
Definition: ABI.h:26
@ OpenCL
Definition: LangStandard.h:65
@ CPlusPlus
Definition: LangStandard.h:55
@ NonNull
Values of this type can never be null.
@ SC_Auto
Definition: Specifiers.h:256
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
Definition: Linkage.h:24
@ SD_Automatic
Automatic storage duration (most local variables).
Definition: Specifiers.h:329
@ Dtor_Base
Base object dtor.
Definition: ABI.h:36
@ Dtor_Complete
Complete object dtor.
Definition: ABI.h:35
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
@ VK_LValue
An l-value expression is a reference to an object with independent storage.
Definition: Specifiers.h:139
const FunctionProtoType * T
@ ThreadPrivateVar
Parameter for Thread private variable.
float __ovld __cnfn length(float)
Return the length of vector p, i.e., sqrt(p.x2 + p.y 2 + ...)
static Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable /p VD.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::PointerType * AllocaInt8PtrTy
A metaprogramming class for ensuring that a value will dominate an arbitrary position in a function.
Definition: EHScopeStack.h:65
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:159