clang 20.0.0git
CGExprAgg.cpp
Go to the documentation of this file.
1//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGObjCRuntime.h"
15#include "CGRecordLayout.h"
16#include "CodeGenFunction.h"
17#include "CodeGenModule.h"
18#include "ConstantEmitter.h"
19#include "EHScopeStack.h"
20#include "TargetInfo.h"
22#include "clang/AST/Attr.h"
23#include "clang/AST/DeclCXX.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/Function.h"
28#include "llvm/IR/GlobalVariable.h"
29#include "llvm/IR/Instruction.h"
30#include "llvm/IR/IntrinsicInst.h"
31#include "llvm/IR/Intrinsics.h"
32using namespace clang;
33using namespace CodeGen;
34
35//===----------------------------------------------------------------------===//
36// Aggregate Expression Emitter
37//===----------------------------------------------------------------------===//
38
39namespace llvm {
40extern cl::opt<bool> EnableSingleByteCoverage;
41} // namespace llvm
42
43namespace {
44class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
45 CodeGenFunction &CGF;
46 CGBuilderTy &Builder;
47 AggValueSlot Dest;
48 bool IsResultUnused;
49
50 AggValueSlot EnsureSlot(QualType T) {
51 if (!Dest.isIgnored()) return Dest;
52 return CGF.CreateAggTemp(T, "agg.tmp.ensured");
53 }
54 void EnsureDest(QualType T) {
55 if (!Dest.isIgnored()) return;
56 Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
57 }
58
59 // Calls `Fn` with a valid return value slot, potentially creating a temporary
60 // to do so. If a temporary is created, an appropriate copy into `Dest` will
61 // be emitted, as will lifetime markers.
62 //
63 // The given function should take a ReturnValueSlot, and return an RValue that
64 // points to said slot.
65 void withReturnValueSlot(const Expr *E,
66 llvm::function_ref<RValue(ReturnValueSlot)> Fn);
67
68 void DoZeroInitPadding(uint64_t &PaddingStart, uint64_t PaddingEnd,
69 const FieldDecl *NextField);
70
71public:
72 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
73 : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
74 IsResultUnused(IsResultUnused) { }
75
76 //===--------------------------------------------------------------------===//
77 // Utilities
78 //===--------------------------------------------------------------------===//
79
80 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
81 /// represents a value lvalue, this method emits the address of the lvalue,
82 /// then loads the result into DestPtr.
83 void EmitAggLoadOfLValue(const Expr *E);
84
85 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
86 /// SrcIsRValue is true if source comes from an RValue.
87 void EmitFinalDestCopy(QualType type, const LValue &src,
88 CodeGenFunction::ExprValueKind SrcValueKind =
89 CodeGenFunction::EVK_NonRValue);
90 void EmitFinalDestCopy(QualType type, RValue src);
91 void EmitCopy(QualType type, const AggValueSlot &dest,
92 const AggValueSlot &src);
93
94 void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,
95 Expr *ExprToVisit, ArrayRef<Expr *> Args,
96 Expr *ArrayFiller);
97
99 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
102 }
103
104 bool TypeRequiresGCollection(QualType T);
105
106 //===--------------------------------------------------------------------===//
107 // Visitor Methods
108 //===--------------------------------------------------------------------===//
109
110 void Visit(Expr *E) {
111 ApplyDebugLocation DL(CGF, E);
113 }
114
115 void VisitStmt(Stmt *S) {
116 CGF.ErrorUnsupported(S, "aggregate expression");
117 }
118 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
119 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
120 Visit(GE->getResultExpr());
121 }
122 void VisitCoawaitExpr(CoawaitExpr *E) {
123 CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
124 }
125 void VisitCoyieldExpr(CoyieldExpr *E) {
126 CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
127 }
128 void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
129 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
130 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
131 return Visit(E->getReplacement());
132 }
133
134 void VisitConstantExpr(ConstantExpr *E) {
135 EnsureDest(E->getType());
136
137 if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
139 Result, Dest.getAddress(),
140 llvm::TypeSize::getFixed(
141 Dest.getPreferredSize(CGF.getContext(), E->getType())
142 .getQuantity()),
144 return;
145 }
146 return Visit(E->getSubExpr());
147 }
148
149 // l-values.
150 void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
151 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
152 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
153 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
154 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
155 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
156 EmitAggLoadOfLValue(E);
157 }
158 void VisitPredefinedExpr(const PredefinedExpr *E) {
159 EmitAggLoadOfLValue(E);
160 }
161
162 // Operators.
163 void VisitCastExpr(CastExpr *E);
164 void VisitCallExpr(const CallExpr *E);
165 void VisitStmtExpr(const StmtExpr *E);
166 void VisitBinaryOperator(const BinaryOperator *BO);
167 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
168 void VisitBinAssign(const BinaryOperator *E);
169 void VisitBinComma(const BinaryOperator *E);
170 void VisitBinCmp(const BinaryOperator *E);
171 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
172 Visit(E->getSemanticForm());
173 }
174
175 void VisitObjCMessageExpr(ObjCMessageExpr *E);
176 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
177 EmitAggLoadOfLValue(E);
178 }
179
180 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
181 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
182 void VisitChooseExpr(const ChooseExpr *CE);
183 void VisitInitListExpr(InitListExpr *E);
184 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
185 FieldDecl *InitializedFieldInUnion,
186 Expr *ArrayFiller);
187 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
188 llvm::Value *outerBegin = nullptr);
189 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
190 void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
191 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
192 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
193 Visit(DAE->getExpr());
194 }
195 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
196 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
197 Visit(DIE->getExpr());
198 }
199 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
200 void VisitCXXConstructExpr(const CXXConstructExpr *E);
201 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
202 void VisitLambdaExpr(LambdaExpr *E);
203 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
204 void VisitExprWithCleanups(ExprWithCleanups *E);
205 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
206 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
207 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
208 void VisitOpaqueValueExpr(OpaqueValueExpr *E);
209
210 void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
211 if (E->isGLValue()) {
213 return EmitFinalDestCopy(E->getType(), LV);
214 }
215
216 AggValueSlot Slot = EnsureSlot(E->getType());
217 bool NeedsDestruction =
218 !Slot.isExternallyDestructed() &&
220 if (NeedsDestruction)
222 CGF.EmitPseudoObjectRValue(E, Slot);
223 if (NeedsDestruction)
225 E->getType());
226 }
227
228 void VisitVAArgExpr(VAArgExpr *E);
229 void VisitCXXParenListInitExpr(CXXParenListInitExpr *E);
230 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
231 Expr *ArrayFiller);
232
233 void EmitInitializationToLValue(Expr *E, LValue Address);
234 void EmitNullInitializationToLValue(LValue Address);
235 // case Expr::ChooseExprClass:
236 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
237 void VisitAtomicExpr(AtomicExpr *E) {
238 RValue Res = CGF.EmitAtomicExpr(E);
239 EmitFinalDestCopy(E->getType(), Res);
240 }
241 void VisitPackIndexingExpr(PackIndexingExpr *E) {
242 Visit(E->getSelectedExpr());
243 }
244};
245} // end anonymous namespace.
246
247//===----------------------------------------------------------------------===//
248// Utilities
249//===----------------------------------------------------------------------===//
250
251/// EmitAggLoadOfLValue - Given an expression with aggregate type that
252/// represents a value lvalue, this method emits the address of the lvalue,
253/// then loads the result into DestPtr.
254void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
255 LValue LV = CGF.EmitLValue(E);
256
257 // If the type of the l-value is atomic, then do an atomic load.
259 CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
260 return;
261 }
262
263 EmitFinalDestCopy(E->getType(), LV);
264}
265
266/// True if the given aggregate type requires special GC API calls.
267bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
268 // Only record types have members that might require garbage collection.
269 const RecordType *RecordTy = T->getAs<RecordType>();
270 if (!RecordTy) return false;
271
272 // Don't mess with non-trivial C++ types.
273 RecordDecl *Record = RecordTy->getDecl();
274 if (isa<CXXRecordDecl>(Record) &&
275 (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
276 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
277 return false;
278
279 // Check whether the type has an object member.
280 return Record->hasObjectMember();
281}
282
283void AggExprEmitter::withReturnValueSlot(
284 const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
285 QualType RetTy = E->getType();
286 bool RequiresDestruction =
287 !Dest.isExternallyDestructed() &&
289
290 // If it makes no observable difference, save a memcpy + temporary.
291 //
292 // We need to always provide our own temporary if destruction is required.
293 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
294 // its lifetime before we have the chance to emit a proper destructor call.
295 bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
296 (RequiresDestruction && Dest.isIgnored());
297
298 Address RetAddr = Address::invalid();
299 RawAddress RetAllocaAddr = RawAddress::invalid();
300
301 EHScopeStack::stable_iterator LifetimeEndBlock;
302 llvm::Value *LifetimeSizePtr = nullptr;
303 llvm::IntrinsicInst *LifetimeStartInst = nullptr;
304 if (!UseTemp) {
305 RetAddr = Dest.getAddress();
306 } else {
307 RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);
308 llvm::TypeSize Size =
309 CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
310 LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());
311 if (LifetimeSizePtr) {
312 LifetimeStartInst =
313 cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
314 assert(LifetimeStartInst->getIntrinsicID() ==
315 llvm::Intrinsic::lifetime_start &&
316 "Last insertion wasn't a lifetime.start?");
317
318 CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
319 NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr);
320 LifetimeEndBlock = CGF.EHStack.stable_begin();
321 }
322 }
323
324 RValue Src =
325 EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
326 Dest.isExternallyDestructed()));
327
328 if (!UseTemp)
329 return;
330
331 assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=
332 Src.getAggregatePointer(E->getType(), CGF));
333 EmitFinalDestCopy(E->getType(), Src);
334
335 if (!RequiresDestruction && LifetimeStartInst) {
336 // If there's no dtor to run, the copy was the last use of our temporary.
337 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
338 // eagerly.
339 CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
340 CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer());
341 }
342}
343
344/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
345void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
346 assert(src.isAggregate() && "value must be aggregate value!");
347 LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
348 EmitFinalDestCopy(type, srcLV, CodeGenFunction::EVK_RValue);
349}
350
351/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
352void AggExprEmitter::EmitFinalDestCopy(
353 QualType type, const LValue &src,
354 CodeGenFunction::ExprValueKind SrcValueKind) {
355 // If Dest is ignored, then we're evaluating an aggregate expression
356 // in a context that doesn't care about the result. Note that loads
357 // from volatile l-values force the existence of a non-ignored
358 // destination.
359 if (Dest.isIgnored())
360 return;
361
362 // Copy non-trivial C structs here.
363 LValue DstLV = CGF.MakeAddrLValue(
364 Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
365
366 if (SrcValueKind == CodeGenFunction::EVK_RValue) {
367 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
368 if (Dest.isPotentiallyAliased())
369 CGF.callCStructMoveAssignmentOperator(DstLV, src);
370 else
371 CGF.callCStructMoveConstructor(DstLV, src);
372 return;
373 }
374 } else {
375 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
376 if (Dest.isPotentiallyAliased())
377 CGF.callCStructCopyAssignmentOperator(DstLV, src);
378 else
379 CGF.callCStructCopyConstructor(DstLV, src);
380 return;
381 }
382 }
383
387 EmitCopy(type, Dest, srcAgg);
388}
389
390/// Perform a copy from the source into the destination.
391///
392/// \param type - the type of the aggregate being copied; qualifiers are
393/// ignored
394void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
395 const AggValueSlot &src) {
396 if (dest.requiresGCollection()) {
397 CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
398 llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
400 dest.getAddress(),
401 src.getAddress(),
402 size);
403 return;
404 }
405
406 // If the result of the assignment is used, copy the LHS there also.
407 // It's volatile if either side is. Use the minimum alignment of
408 // the two sides.
409 LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
410 LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
411 CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
412 dest.isVolatile() || src.isVolatile());
413}
414
415/// Emit the initializer for a std::initializer_list initialized with a
416/// real initializer list.
417void
418AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
419 // Emit an array containing the elements. The array is externally destructed
420 // if the std::initializer_list object is.
421 ASTContext &Ctx = CGF.getContext();
422 LValue Array = CGF.EmitLValue(E->getSubExpr());
423 assert(Array.isSimple() && "initializer_list array not a simple lvalue");
424 Address ArrayPtr = Array.getAddress();
425
427 Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
428 assert(ArrayType && "std::initializer_list constructed from non-array");
429
430 RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
431 RecordDecl::field_iterator Field = Record->field_begin();
432 assert(Field != Record->field_end() &&
433 Ctx.hasSameType(Field->getType()->getPointeeType(),
435 "Expected std::initializer_list first field to be const E *");
436
437 // Start pointer.
438 AggValueSlot Dest = EnsureSlot(E->getType());
439 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
440 LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
441 llvm::Value *ArrayStart = ArrayPtr.emitRawPointer(CGF);
442 CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
443 ++Field;
444 assert(Field != Record->field_end() &&
445 "Expected std::initializer_list to have two fields");
446
447 llvm::Value *Size = Builder.getInt(ArrayType->getSize());
448 LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
449 if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
450 // Length.
451 CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
452
453 } else {
454 // End pointer.
455 assert(Field->getType()->isPointerType() &&
456 Ctx.hasSameType(Field->getType()->getPointeeType(),
458 "Expected std::initializer_list second field to be const E *");
459 llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
460 llvm::Value *IdxEnd[] = { Zero, Size };
461 llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
462 ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
463 "arrayend");
464 CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
465 }
466
467 assert(++Field == Record->field_end() &&
468 "Expected std::initializer_list to only have two fields");
469}
470
471/// Determine if E is a trivial array filler, that is, one that is
472/// equivalent to zero-initialization.
473static bool isTrivialFiller(Expr *E) {
474 if (!E)
475 return true;
476
477 if (isa<ImplicitValueInitExpr>(E))
478 return true;
479
480 if (auto *ILE = dyn_cast<InitListExpr>(E)) {
481 if (ILE->getNumInits())
482 return false;
483 return isTrivialFiller(ILE->getArrayFiller());
484 }
485
486 if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
487 return Cons->getConstructor()->isDefaultConstructor() &&
488 Cons->getConstructor()->isTrivial();
489
490 // FIXME: Are there other cases where we can avoid emitting an initializer?
491 return false;
492}
493
494/// Emit initialization of an array from an initializer list. ExprToVisit must
495/// be either an InitListEpxr a CXXParenInitListExpr.
496void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
497 QualType ArrayQTy, Expr *ExprToVisit,
498 ArrayRef<Expr *> Args, Expr *ArrayFiller) {
499 uint64_t NumInitElements = Args.size();
500
501 uint64_t NumArrayElements = AType->getNumElements();
502 for (const auto *Init : Args) {
503 if (const auto *Embed = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
504 NumInitElements += Embed->getDataElementCount() - 1;
505 if (NumInitElements > NumArrayElements) {
506 NumInitElements = NumArrayElements;
507 break;
508 }
509 }
510 }
511
512 assert(NumInitElements <= NumArrayElements);
513
514 QualType elementType =
515 CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
516 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
517 CharUnits elementAlign =
518 DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
519 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
520
521 // Consider initializing the array by copying from a global. For this to be
522 // more efficient than per-element initialization, the size of the elements
523 // with explicit initializers should be large enough.
524 if (NumInitElements * elementSize.getQuantity() > 16 &&
525 elementType.isTriviallyCopyableType(CGF.getContext())) {
526 CodeGen::CodeGenModule &CGM = CGF.CGM;
528 QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType(
529 CGM.getContext().removeAddrSpaceQualType(ArrayQTy),
531 LangAS AS = GVArrayQTy.getAddressSpace();
532 if (llvm::Constant *C =
533 Emitter.tryEmitForInitializer(ExprToVisit, AS, GVArrayQTy)) {
534 auto GV = new llvm::GlobalVariable(
535 CGM.getModule(), C->getType(),
536 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
537 "constinit",
538 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
540 Emitter.finalize(GV);
541 CharUnits Align = CGM.getContext().getTypeAlignInChars(GVArrayQTy);
542 GV->setAlignment(Align.getAsAlign());
543 Address GVAddr(GV, GV->getValueType(), Align);
544 EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, GVArrayQTy));
545 return;
546 }
547 }
548
549 // Exception safety requires us to destroy all the
550 // already-constructed members if an initializer throws.
551 // For that, we'll need an EH cleanup.
552 QualType::DestructionKind dtorKind = elementType.isDestructedType();
553 Address endOfInit = Address::invalid();
554 CodeGenFunction::CleanupDeactivationScope deactivation(CGF);
555
556 llvm::Value *begin = DestPtr.emitRawPointer(CGF);
557 if (dtorKind) {
558 CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF);
559 // In principle we could tell the cleanup where we are more
560 // directly, but the control flow can get so varied here that it
561 // would actually be quite complex. Therefore we go through an
562 // alloca.
563 llvm::Instruction *dominatingIP =
564 Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy));
565 endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
566 "arrayinit.endOfInit");
567 Builder.CreateStore(begin, endOfInit);
568 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
569 elementAlign,
570 CGF.getDestroyer(dtorKind));
571 cast<EHCleanupScope>(*CGF.EHStack.find(CGF.EHStack.stable_begin()))
572 .AddAuxAllocas(allocaTracker.Take());
573
575 {CGF.EHStack.stable_begin(), dominatingIP});
576 }
577
578 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
579
580 auto Emit = [&](Expr *Init, uint64_t ArrayIndex) {
581 llvm::Value *element = begin;
582 if (ArrayIndex > 0) {
583 element = Builder.CreateInBoundsGEP(
584 llvmElementType, begin,
585 llvm::ConstantInt::get(CGF.SizeTy, ArrayIndex), "arrayinit.element");
586
587 // Tell the cleanup that it needs to destroy up to this
588 // element. TODO: some of these stores can be trivially
589 // observed to be unnecessary.
590 if (endOfInit.isValid())
591 Builder.CreateStore(element, endOfInit);
592 }
593
594 LValue elementLV = CGF.MakeAddrLValue(
595 Address(element, llvmElementType, elementAlign), elementType);
596 EmitInitializationToLValue(Init, elementLV);
597 return true;
598 };
599
600 unsigned ArrayIndex = 0;
601 // Emit the explicit initializers.
602 for (uint64_t i = 0; i != NumInitElements; ++i) {
603 if (ArrayIndex >= NumInitElements)
604 break;
605 if (auto *EmbedS = dyn_cast<EmbedExpr>(Args[i]->IgnoreParenImpCasts())) {
606 EmbedS->doForEachDataElement(Emit, ArrayIndex);
607 } else {
608 Emit(Args[i], ArrayIndex);
609 ArrayIndex++;
610 }
611 }
612
613 // Check whether there's a non-trivial array-fill expression.
614 bool hasTrivialFiller = isTrivialFiller(ArrayFiller);
615
616 // Any remaining elements need to be zero-initialized, possibly
617 // using the filler expression. We can skip this if the we're
618 // emitting to zeroed memory.
619 if (NumInitElements != NumArrayElements &&
620 !(Dest.isZeroed() && hasTrivialFiller &&
621 CGF.getTypes().isZeroInitializable(elementType))) {
622
623 // Use an actual loop. This is basically
624 // do { *array++ = filler; } while (array != end);
625
626 // Advance to the start of the rest of the array.
627 llvm::Value *element = begin;
628 if (NumInitElements) {
629 element = Builder.CreateInBoundsGEP(
630 llvmElementType, element,
631 llvm::ConstantInt::get(CGF.SizeTy, NumInitElements),
632 "arrayinit.start");
633 if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
634 }
635
636 // Compute the end of the array.
637 llvm::Value *end = Builder.CreateInBoundsGEP(
638 llvmElementType, begin,
639 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end");
640
641 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
642 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
643
644 // Jump into the body.
645 CGF.EmitBlock(bodyBB);
646 llvm::PHINode *currentElement =
647 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
648 currentElement->addIncoming(element, entryBB);
649
650 // Emit the actual filler expression.
651 {
652 // C++1z [class.temporary]p5:
653 // when a default constructor is called to initialize an element of
654 // an array with no corresponding initializer [...] the destruction of
655 // every temporary created in a default argument is sequenced before
656 // the construction of the next array element, if any
657 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
658 LValue elementLV = CGF.MakeAddrLValue(
659 Address(currentElement, llvmElementType, elementAlign), elementType);
660 if (ArrayFiller)
661 EmitInitializationToLValue(ArrayFiller, elementLV);
662 else
663 EmitNullInitializationToLValue(elementLV);
664 }
665
666 // Move on to the next element.
667 llvm::Value *nextElement = Builder.CreateInBoundsGEP(
668 llvmElementType, currentElement, one, "arrayinit.next");
669
670 // Tell the EH cleanup that we finished with the last element.
671 if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
672
673 // Leave the loop if we're done.
674 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
675 "arrayinit.done");
676 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
677 Builder.CreateCondBr(done, endBB, bodyBB);
678 currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
679
680 CGF.EmitBlock(endBB);
681 }
682}
683
684//===----------------------------------------------------------------------===//
685// Visitor Methods
686//===----------------------------------------------------------------------===//
687
688void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
689 Visit(E->getSubExpr());
690}
691
692void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
693 // If this is a unique OVE, just visit its source expression.
694 if (e->isUnique())
695 Visit(e->getSourceExpr());
696 else
697 EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
698}
699
700void
701AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
702 if (Dest.isPotentiallyAliased() &&
703 E->getType().isPODType(CGF.getContext())) {
704 // For a POD type, just emit a load of the lvalue + a copy, because our
705 // compound literal might alias the destination.
706 EmitAggLoadOfLValue(E);
707 return;
708 }
709
710 AggValueSlot Slot = EnsureSlot(E->getType());
711
712 // Block-scope compound literals are destroyed at the end of the enclosing
713 // scope in C.
714 bool Destruct =
715 !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
716 if (Destruct)
718
719 CGF.EmitAggExpr(E->getInitializer(), Slot);
720
721 if (Destruct)
724 CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),
725 CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);
726}
727
728/// Attempt to look through various unimportant expressions to find a
729/// cast of the given kind.
730static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
731 op = op->IgnoreParenNoopCasts(ctx);
732 if (auto castE = dyn_cast<CastExpr>(op)) {
733 if (castE->getCastKind() == kind)
734 return castE->getSubExpr();
735 }
736 return nullptr;
737}
738
739void AggExprEmitter::VisitCastExpr(CastExpr *E) {
740 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
741 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
742 switch (E->getCastKind()) {
743 case CK_Dynamic: {
744 // FIXME: Can this actually happen? We have no test coverage for it.
745 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
746 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
747 CodeGenFunction::TCK_Load);
748 // FIXME: Do we also need to handle property references here?
749 if (LV.isSimple())
750 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
751 else
752 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
753
754 if (!Dest.isIgnored())
755 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
756 break;
757 }
758
759 case CK_ToUnion: {
760 // Evaluate even if the destination is ignored.
761 if (Dest.isIgnored()) {
762 CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
763 /*ignoreResult=*/true);
764 break;
765 }
766
767 // GCC union extension
768 QualType Ty = E->getSubExpr()->getType();
769 Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty));
770 EmitInitializationToLValue(E->getSubExpr(),
771 CGF.MakeAddrLValue(CastPtr, Ty));
772 break;
773 }
774
775 case CK_LValueToRValueBitCast: {
776 if (Dest.isIgnored()) {
777 CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
778 /*ignoreResult=*/true);
779 break;
780 }
781
782 LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
783 Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);
784 Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
785 llvm::Value *SizeVal = llvm::ConstantInt::get(
786 CGF.SizeTy,
788 Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
789 break;
790 }
791
792 case CK_DerivedToBase:
793 case CK_BaseToDerived:
794 case CK_UncheckedDerivedToBase: {
795 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
796 "should have been unpacked before we got here");
797 }
798
799 case CK_NonAtomicToAtomic:
800 case CK_AtomicToNonAtomic: {
801 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
802
803 // Determine the atomic and value types.
804 QualType atomicType = E->getSubExpr()->getType();
805 QualType valueType = E->getType();
806 if (isToAtomic) std::swap(atomicType, valueType);
807
808 assert(atomicType->isAtomicType());
809 assert(CGF.getContext().hasSameUnqualifiedType(valueType,
810 atomicType->castAs<AtomicType>()->getValueType()));
811
812 // Just recurse normally if we're ignoring the result or the
813 // atomic type doesn't change representation.
814 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
815 return Visit(E->getSubExpr());
816 }
817
818 CastKind peepholeTarget =
819 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
820
821 // These two cases are reverses of each other; try to peephole them.
822 if (Expr *op =
823 findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
824 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
825 E->getType()) &&
826 "peephole significantly changed types?");
827 return Visit(op);
828 }
829
830 // If we're converting an r-value of non-atomic type to an r-value
831 // of atomic type, just emit directly into the relevant sub-object.
832 if (isToAtomic) {
833 AggValueSlot valueDest = Dest;
834 if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
835 // Zero-initialize. (Strictly speaking, we only need to initialize
836 // the padding at the end, but this is simpler.)
837 if (!Dest.isZeroed())
839
840 // Build a GEP to refer to the subobject.
841 Address valueAddr =
842 CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
843 valueDest = AggValueSlot::forAddr(valueAddr,
844 valueDest.getQualifiers(),
845 valueDest.isExternallyDestructed(),
846 valueDest.requiresGCollection(),
847 valueDest.isPotentiallyAliased(),
850 }
851
852 CGF.EmitAggExpr(E->getSubExpr(), valueDest);
853 return;
854 }
855
856 // Otherwise, we're converting an atomic type to a non-atomic type.
857 // Make an atomic temporary, emit into that, and then copy the value out.
858 AggValueSlot atomicSlot =
859 CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
860 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
861
862 Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
863 RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
864 return EmitFinalDestCopy(valueType, rvalue);
865 }
866 case CK_AddressSpaceConversion:
867 return Visit(E->getSubExpr());
868
869 case CK_LValueToRValue:
870 // If we're loading from a volatile type, force the destination
871 // into existence.
872 if (E->getSubExpr()->getType().isVolatileQualified()) {
873 bool Destruct =
874 !Dest.isExternallyDestructed() &&
876 if (Destruct)
878 EnsureDest(E->getType());
879 Visit(E->getSubExpr());
880
881 if (Destruct)
883 E->getType());
884
885 return;
886 }
887
888 [[fallthrough]];
889
890 case CK_HLSLArrayRValue:
891 Visit(E->getSubExpr());
892 break;
893
894 case CK_NoOp:
895 case CK_UserDefinedConversion:
896 case CK_ConstructorConversion:
897 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
898 E->getType()) &&
899 "Implicit cast types must be compatible");
900 Visit(E->getSubExpr());
901 break;
902
903 case CK_LValueBitCast:
904 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
905
906 case CK_Dependent:
907 case CK_BitCast:
908 case CK_ArrayToPointerDecay:
909 case CK_FunctionToPointerDecay:
910 case CK_NullToPointer:
911 case CK_NullToMemberPointer:
912 case CK_BaseToDerivedMemberPointer:
913 case CK_DerivedToBaseMemberPointer:
914 case CK_MemberPointerToBoolean:
915 case CK_ReinterpretMemberPointer:
916 case CK_IntegralToPointer:
917 case CK_PointerToIntegral:
918 case CK_PointerToBoolean:
919 case CK_ToVoid:
920 case CK_VectorSplat:
921 case CK_IntegralCast:
922 case CK_BooleanToSignedIntegral:
923 case CK_IntegralToBoolean:
924 case CK_IntegralToFloating:
925 case CK_FloatingToIntegral:
926 case CK_FloatingToBoolean:
927 case CK_FloatingCast:
928 case CK_CPointerToObjCPointerCast:
929 case CK_BlockPointerToObjCPointerCast:
930 case CK_AnyPointerToBlockPointerCast:
931 case CK_ObjCObjectLValueCast:
932 case CK_FloatingRealToComplex:
933 case CK_FloatingComplexToReal:
934 case CK_FloatingComplexToBoolean:
935 case CK_FloatingComplexCast:
936 case CK_FloatingComplexToIntegralComplex:
937 case CK_IntegralRealToComplex:
938 case CK_IntegralComplexToReal:
939 case CK_IntegralComplexToBoolean:
940 case CK_IntegralComplexCast:
941 case CK_IntegralComplexToFloatingComplex:
942 case CK_ARCProduceObject:
943 case CK_ARCConsumeObject:
944 case CK_ARCReclaimReturnedObject:
945 case CK_ARCExtendBlockObject:
946 case CK_CopyAndAutoreleaseBlockObject:
947 case CK_BuiltinFnToFnPtr:
948 case CK_ZeroToOCLOpaqueType:
949 case CK_MatrixCast:
950 case CK_HLSLVectorTruncation:
951
952 case CK_IntToOCLSampler:
953 case CK_FloatingToFixedPoint:
954 case CK_FixedPointToFloating:
955 case CK_FixedPointCast:
956 case CK_FixedPointToBoolean:
957 case CK_FixedPointToIntegral:
958 case CK_IntegralToFixedPoint:
959 llvm_unreachable("cast kind invalid for aggregate types");
960 }
961}
962
963void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
964 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
965 EmitAggLoadOfLValue(E);
966 return;
967 }
968
969 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
970 return CGF.EmitCallExpr(E, Slot);
971 });
972}
973
974void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
975 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
976 return CGF.EmitObjCMessageExpr(E, Slot);
977 });
978}
979
980void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
981 CGF.EmitIgnoredExpr(E->getLHS());
982 Visit(E->getRHS());
983}
984
985void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
986 CodeGenFunction::StmtExprEvaluation eval(CGF);
987 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
988}
989
994};
995
996static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
997 const BinaryOperator *E, llvm::Value *LHS,
998 llvm::Value *RHS, CompareKind Kind,
999 const char *NameSuffix = "") {
1000 QualType ArgTy = E->getLHS()->getType();
1001 if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
1002 ArgTy = CT->getElementType();
1003
1004 if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
1005 assert(Kind == CK_Equal &&
1006 "member pointers may only be compared for equality");
1008 CGF, LHS, RHS, MPT, /*IsInequality*/ false);
1009 }
1010
1011 // Compute the comparison instructions for the specified comparison kind.
1012 struct CmpInstInfo {
1013 const char *Name;
1014 llvm::CmpInst::Predicate FCmp;
1015 llvm::CmpInst::Predicate SCmp;
1016 llvm::CmpInst::Predicate UCmp;
1017 };
1018 CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
1019 using FI = llvm::FCmpInst;
1020 using II = llvm::ICmpInst;
1021 switch (Kind) {
1022 case CK_Less:
1023 return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
1024 case CK_Greater:
1025 return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
1026 case CK_Equal:
1027 return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
1028 }
1029 llvm_unreachable("Unrecognised CompareKind enum");
1030 }();
1031
1032 if (ArgTy->hasFloatingRepresentation())
1033 return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
1034 llvm::Twine(InstInfo.Name) + NameSuffix);
1035 if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
1036 auto Inst =
1037 ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
1038 return Builder.CreateICmp(Inst, LHS, RHS,
1039 llvm::Twine(InstInfo.Name) + NameSuffix);
1040 }
1041
1042 llvm_unreachable("unsupported aggregate binary expression should have "
1043 "already been handled");
1044}
1045
1046void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
1047 using llvm::BasicBlock;
1048 using llvm::PHINode;
1049 using llvm::Value;
1050 assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
1051 E->getRHS()->getType()));
1052 const ComparisonCategoryInfo &CmpInfo =
1054 assert(CmpInfo.Record->isTriviallyCopyable() &&
1055 "cannot copy non-trivially copyable aggregate");
1056
1057 QualType ArgTy = E->getLHS()->getType();
1058
1059 if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
1060 !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
1061 !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
1062 return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
1063 }
1064 bool IsComplex = ArgTy->isAnyComplexType();
1065
1066 // Evaluate the operands to the expression and extract their values.
1067 auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
1068 RValue RV = CGF.EmitAnyExpr(E);
1069 if (RV.isScalar())
1070 return {RV.getScalarVal(), nullptr};
1071 if (RV.isAggregate())
1072 return {RV.getAggregatePointer(E->getType(), CGF), nullptr};
1073 assert(RV.isComplex());
1074 return RV.getComplexVal();
1075 };
1076 auto LHSValues = EmitOperand(E->getLHS()),
1077 RHSValues = EmitOperand(E->getRHS());
1078
1079 auto EmitCmp = [&](CompareKind K) {
1080 Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1081 K, IsComplex ? ".r" : "");
1082 if (!IsComplex)
1083 return Cmp;
1084 assert(K == CompareKind::CK_Equal);
1085 Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1086 RHSValues.second, K, ".i");
1087 return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1088 };
1089 auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1090 return Builder.getInt(VInfo->getIntValue());
1091 };
1092
1093 Value *Select;
1094 if (ArgTy->isNullPtrType()) {
1095 Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1096 } else if (!CmpInfo.isPartial()) {
1097 Value *SelectOne =
1098 Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1099 EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1100 Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1101 EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1102 SelectOne, "sel.eq");
1103 } else {
1104 Value *SelectEq = Builder.CreateSelect(
1105 EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1106 EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1107 Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1108 EmitCmpRes(CmpInfo.getGreater()),
1109 SelectEq, "sel.gt");
1110 Select = Builder.CreateSelect(
1111 EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1112 }
1113 // Create the return value in the destination slot.
1114 EnsureDest(E->getType());
1115 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1116
1117 // Emit the address of the first (and only) field in the comparison category
1118 // type, and initialize it from the constant integer value selected above.
1120 DestLV, *CmpInfo.Record->field_begin());
1121 CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1122
1123 // All done! The result is in the Dest slot.
1124}
1125
1126void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1127 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1128 VisitPointerToDataMemberBinaryOperator(E);
1129 else
1130 CGF.ErrorUnsupported(E, "aggregate binary expression");
1131}
1132
1133void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1134 const BinaryOperator *E) {
1136 EmitFinalDestCopy(E->getType(), LV);
1137}
1138
1139/// Is the value of the given expression possibly a reference to or
1140/// into a __block variable?
1141static bool isBlockVarRef(const Expr *E) {
1142 // Make sure we look through parens.
1143 E = E->IgnoreParens();
1144
1145 // Check for a direct reference to a __block variable.
1146 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1147 const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1148 return (var && var->hasAttr<BlocksAttr>());
1149 }
1150
1151 // More complicated stuff.
1152
1153 // Binary operators.
1154 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1155 // For an assignment or pointer-to-member operation, just care
1156 // about the LHS.
1157 if (op->isAssignmentOp() || op->isPtrMemOp())
1158 return isBlockVarRef(op->getLHS());
1159
1160 // For a comma, just care about the RHS.
1161 if (op->getOpcode() == BO_Comma)
1162 return isBlockVarRef(op->getRHS());
1163
1164 // FIXME: pointer arithmetic?
1165 return false;
1166
1167 // Check both sides of a conditional operator.
1168 } else if (const AbstractConditionalOperator *op
1169 = dyn_cast<AbstractConditionalOperator>(E)) {
1170 return isBlockVarRef(op->getTrueExpr())
1171 || isBlockVarRef(op->getFalseExpr());
1172
1173 // OVEs are required to support BinaryConditionalOperators.
1174 } else if (const OpaqueValueExpr *op
1175 = dyn_cast<OpaqueValueExpr>(E)) {
1176 if (const Expr *src = op->getSourceExpr())
1177 return isBlockVarRef(src);
1178
1179 // Casts are necessary to get things like (*(int*)&var) = foo().
1180 // We don't really care about the kind of cast here, except
1181 // we don't want to look through l2r casts, because it's okay
1182 // to get the *value* in a __block variable.
1183 } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1184 if (cast->getCastKind() == CK_LValueToRValue)
1185 return false;
1186 return isBlockVarRef(cast->getSubExpr());
1187
1188 // Handle unary operators. Again, just aggressively look through
1189 // it, ignoring the operation.
1190 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1191 return isBlockVarRef(uop->getSubExpr());
1192
1193 // Look into the base of a field access.
1194 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1195 return isBlockVarRef(mem->getBase());
1196
1197 // Look into the base of a subscript.
1198 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1199 return isBlockVarRef(sub->getBase());
1200 }
1201
1202 return false;
1203}
1204
1205void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1206 // For an assignment to work, the value on the right has
1207 // to be compatible with the value on the left.
1208 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1209 E->getRHS()->getType())
1210 && "Invalid assignment");
1211
1212 // If the LHS might be a __block variable, and the RHS can
1213 // potentially cause a block copy, we need to evaluate the RHS first
1214 // so that the assignment goes the right place.
1215 // This is pretty semantically fragile.
1216 if (isBlockVarRef(E->getLHS()) &&
1217 E->getRHS()->HasSideEffects(CGF.getContext())) {
1218 // Ensure that we have a destination, and evaluate the RHS into that.
1219 EnsureDest(E->getRHS()->getType());
1220 Visit(E->getRHS());
1221
1222 // Now emit the LHS and copy into it.
1223 LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1224
1225 // That copy is an atomic copy if the LHS is atomic.
1226 if (LHS.getType()->isAtomicType() ||
1228 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1229 return;
1230 }
1231
1232 EmitCopy(E->getLHS()->getType(),
1234 needsGC(E->getLHS()->getType()),
1237 Dest);
1238 return;
1239 }
1240
1241 LValue LHS = CGF.EmitLValue(E->getLHS());
1242
1243 // If we have an atomic type, evaluate into the destination and then
1244 // do an atomic copy.
1245 if (LHS.getType()->isAtomicType() ||
1247 EnsureDest(E->getRHS()->getType());
1248 Visit(E->getRHS());
1249 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1250 return;
1251 }
1252
1253 // Codegen the RHS so that it stores directly into the LHS.
1255 LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
1257 // A non-volatile aggregate destination might have volatile member.
1258 if (!LHSSlot.isVolatile() &&
1259 CGF.hasVolatileMember(E->getLHS()->getType()))
1260 LHSSlot.setVolatile(true);
1261
1262 CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1263
1264 // Copy into the destination if the assignment isn't ignored.
1265 EmitFinalDestCopy(E->getType(), LHS);
1266
1267 if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
1270 E->getType());
1271}
1272
1273void AggExprEmitter::
1274VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1275 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1276 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1277 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1278
1279 // Bind the common expression if necessary.
1280 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1281
1282 CodeGenFunction::ConditionalEvaluation eval(CGF);
1283 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1284 CGF.getProfileCount(E));
1285
1286 // Save whether the destination's lifetime is externally managed.
1287 bool isExternallyDestructed = Dest.isExternallyDestructed();
1288 bool destructNonTrivialCStruct =
1289 !isExternallyDestructed &&
1291 isExternallyDestructed |= destructNonTrivialCStruct;
1292 Dest.setExternallyDestructed(isExternallyDestructed);
1293
1294 eval.begin(CGF);
1295 CGF.EmitBlock(LHSBlock);
1297 CGF.incrementProfileCounter(E->getTrueExpr());
1298 else
1300 Visit(E->getTrueExpr());
1301 eval.end(CGF);
1302
1303 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1304 CGF.Builder.CreateBr(ContBlock);
1305
1306 // If the result of an agg expression is unused, then the emission
1307 // of the LHS might need to create a destination slot. That's fine
1308 // with us, and we can safely emit the RHS into the same slot, but
1309 // we shouldn't claim that it's already being destructed.
1310 Dest.setExternallyDestructed(isExternallyDestructed);
1311
1312 eval.begin(CGF);
1313 CGF.EmitBlock(RHSBlock);
1315 CGF.incrementProfileCounter(E->getFalseExpr());
1316 Visit(E->getFalseExpr());
1317 eval.end(CGF);
1318
1319 if (destructNonTrivialCStruct)
1321 E->getType());
1322
1323 CGF.EmitBlock(ContBlock);
1326}
1327
1328void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1329 Visit(CE->getChosenSubExpr());
1330}
1331
1332void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1333 Address ArgValue = Address::invalid();
1334 CGF.EmitVAArg(VE, ArgValue, Dest);
1335
1336 // If EmitVAArg fails, emit an error.
1337 if (!ArgValue.isValid()) {
1338 CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1339 return;
1340 }
1341}
1342
1343void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1344 // Ensure that we have a slot, but if we already do, remember
1345 // whether it was externally destructed.
1346 bool wasExternallyDestructed = Dest.isExternallyDestructed();
1347 EnsureDest(E->getType());
1348
1349 // We're going to push a destructor if there isn't already one.
1351
1352 Visit(E->getSubExpr());
1353
1354 // Push that destructor we promised.
1355 if (!wasExternallyDestructed)
1356 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1357}
1358
1359void
1360AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1361 AggValueSlot Slot = EnsureSlot(E->getType());
1362 CGF.EmitCXXConstructExpr(E, Slot);
1363}
1364
1365void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1366 const CXXInheritedCtorInitExpr *E) {
1367 AggValueSlot Slot = EnsureSlot(E->getType());
1369 E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1370 E->inheritedFromVBase(), E);
1371}
1372
1373void
1374AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1375 AggValueSlot Slot = EnsureSlot(E->getType());
1376 LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1377
1378 // We'll need to enter cleanup scopes in case any of the element
1379 // initializers throws an exception or contains branch out of the expressions.
1380 CodeGenFunction::CleanupDeactivationScope scope(CGF);
1381
1382 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1383 for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
1384 e = E->capture_init_end();
1385 i != e; ++i, ++CurField) {
1386 // Emit initialization
1387 LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1388 if (CurField->hasCapturedVLAType()) {
1389 CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1390 continue;
1391 }
1392
1393 EmitInitializationToLValue(*i, LV);
1394
1395 // Push a destructor if necessary.
1396 if (QualType::DestructionKind DtorKind =
1397 CurField->getType().isDestructedType()) {
1398 assert(LV.isSimple());
1399 if (DtorKind)
1401 CurField->getType(),
1402 CGF.getDestroyer(DtorKind), false);
1403 }
1404 }
1405}
1406
1407void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1408 CodeGenFunction::RunCleanupsScope cleanups(CGF);
1409 Visit(E->getSubExpr());
1410}
1411
1412void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1413 QualType T = E->getType();
1414 AggValueSlot Slot = EnsureSlot(T);
1415 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1416}
1417
1418void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1419 QualType T = E->getType();
1420 AggValueSlot Slot = EnsureSlot(T);
1421 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1422}
1423
1424/// Determine whether the given cast kind is known to always convert values
1425/// with all zero bits in their value representation to values with all zero
1426/// bits in their value representation.
1427static bool castPreservesZero(const CastExpr *CE) {
1428 switch (CE->getCastKind()) {
1429 // No-ops.
1430 case CK_NoOp:
1431 case CK_UserDefinedConversion:
1432 case CK_ConstructorConversion:
1433 case CK_BitCast:
1434 case CK_ToUnion:
1435 case CK_ToVoid:
1436 // Conversions between (possibly-complex) integral, (possibly-complex)
1437 // floating-point, and bool.
1438 case CK_BooleanToSignedIntegral:
1439 case CK_FloatingCast:
1440 case CK_FloatingComplexCast:
1441 case CK_FloatingComplexToBoolean:
1442 case CK_FloatingComplexToIntegralComplex:
1443 case CK_FloatingComplexToReal:
1444 case CK_FloatingRealToComplex:
1445 case CK_FloatingToBoolean:
1446 case CK_FloatingToIntegral:
1447 case CK_IntegralCast:
1448 case CK_IntegralComplexCast:
1449 case CK_IntegralComplexToBoolean:
1450 case CK_IntegralComplexToFloatingComplex:
1451 case CK_IntegralComplexToReal:
1452 case CK_IntegralRealToComplex:
1453 case CK_IntegralToBoolean:
1454 case CK_IntegralToFloating:
1455 // Reinterpreting integers as pointers and vice versa.
1456 case CK_IntegralToPointer:
1457 case CK_PointerToIntegral:
1458 // Language extensions.
1459 case CK_VectorSplat:
1460 case CK_MatrixCast:
1461 case CK_NonAtomicToAtomic:
1462 case CK_AtomicToNonAtomic:
1463 case CK_HLSLVectorTruncation:
1464 return true;
1465
1466 case CK_BaseToDerivedMemberPointer:
1467 case CK_DerivedToBaseMemberPointer:
1468 case CK_MemberPointerToBoolean:
1469 case CK_NullToMemberPointer:
1470 case CK_ReinterpretMemberPointer:
1471 // FIXME: ABI-dependent.
1472 return false;
1473
1474 case CK_AnyPointerToBlockPointerCast:
1475 case CK_BlockPointerToObjCPointerCast:
1476 case CK_CPointerToObjCPointerCast:
1477 case CK_ObjCObjectLValueCast:
1478 case CK_IntToOCLSampler:
1479 case CK_ZeroToOCLOpaqueType:
1480 // FIXME: Check these.
1481 return false;
1482
1483 case CK_FixedPointCast:
1484 case CK_FixedPointToBoolean:
1485 case CK_FixedPointToFloating:
1486 case CK_FixedPointToIntegral:
1487 case CK_FloatingToFixedPoint:
1488 case CK_IntegralToFixedPoint:
1489 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1490 return false;
1491
1492 case CK_AddressSpaceConversion:
1493 case CK_BaseToDerived:
1494 case CK_DerivedToBase:
1495 case CK_Dynamic:
1496 case CK_NullToPointer:
1497 case CK_PointerToBoolean:
1498 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1499 // same representation in all involved address spaces.
1500 return false;
1501
1502 case CK_ARCConsumeObject:
1503 case CK_ARCExtendBlockObject:
1504 case CK_ARCProduceObject:
1505 case CK_ARCReclaimReturnedObject:
1506 case CK_CopyAndAutoreleaseBlockObject:
1507 case CK_ArrayToPointerDecay:
1508 case CK_FunctionToPointerDecay:
1509 case CK_BuiltinFnToFnPtr:
1510 case CK_Dependent:
1511 case CK_LValueBitCast:
1512 case CK_LValueToRValue:
1513 case CK_LValueToRValueBitCast:
1514 case CK_UncheckedDerivedToBase:
1515 case CK_HLSLArrayRValue:
1516 return false;
1517 }
1518 llvm_unreachable("Unhandled clang::CastKind enum");
1519}
1520
1521/// isSimpleZero - If emitting this value will obviously just cause a store of
1522/// zero to memory, return true. This can return false if uncertain, so it just
1523/// handles simple cases.
1524static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1525 E = E->IgnoreParens();
1526 while (auto *CE = dyn_cast<CastExpr>(E)) {
1527 if (!castPreservesZero(CE))
1528 break;
1529 E = CE->getSubExpr()->IgnoreParens();
1530 }
1531
1532 // 0
1533 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1534 return IL->getValue() == 0;
1535 // +0.0
1536 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1537 return FL->getValue().isPosZero();
1538 // int()
1539 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1541 return true;
1542 // (int*)0 - Null pointer expressions.
1543 if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1544 return ICE->getCastKind() == CK_NullToPointer &&
1546 !E->HasSideEffects(CGF.getContext());
1547 // '\0'
1548 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1549 return CL->getValue() == 0;
1550
1551 // Otherwise, hard case: conservatively return false.
1552 return false;
1553}
1554
1555
1556void
1557AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1558 QualType type = LV.getType();
1559 // FIXME: Ignore result?
1560 // FIXME: Are initializers affected by volatile?
1561 if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1562 // Storing "i32 0" to a zero'd memory location is a noop.
1563 return;
1564 } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1565 return EmitNullInitializationToLValue(LV);
1566 } else if (isa<NoInitExpr>(E)) {
1567 // Do nothing.
1568 return;
1569 } else if (type->isReferenceType()) {
1571 return CGF.EmitStoreThroughLValue(RV, LV);
1572 }
1573
1574 CGF.EmitInitializationToLValue(E, LV, Dest.isZeroed());
1575}
1576
1577void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1578 QualType type = lv.getType();
1579
1580 // If the destination slot is already zeroed out before the aggregate is
1581 // copied into it, we don't have to emit any zeros here.
1582 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1583 return;
1584
1585 if (CGF.hasScalarEvaluationKind(type)) {
1586 // For non-aggregates, we can store the appropriate null constant.
1587 llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1588 // Note that the following is not equivalent to
1589 // EmitStoreThroughBitfieldLValue for ARC types.
1590 if (lv.isBitField()) {
1592 } else {
1593 assert(lv.isSimple());
1594 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1595 }
1596 } else {
1597 // There's a potential optimization opportunity in combining
1598 // memsets; that would be easy for arrays, but relatively
1599 // difficult for structures with the current code.
1601 }
1602}
1603
1604void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
1605 VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
1606 E->getInitializedFieldInUnion(),
1607 E->getArrayFiller());
1608}
1609
1610void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1611 if (E->hadArrayRangeDesignator())
1612 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1613
1614 if (E->isTransparent())
1615 return Visit(E->getInit(0));
1616
1617 VisitCXXParenListOrInitListExpr(
1618 E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller());
1619}
1620
1621void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1622 Expr *ExprToVisit, ArrayRef<Expr *> InitExprs,
1623 FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) {
1624#if 0
1625 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1626 // (Length of globals? Chunks of zeroed-out space?).
1627 //
1628 // If we can, prefer a copy from a global; this is a lot less code for long
1629 // globals, and it's easier for the current optimizers to analyze.
1630 if (llvm::Constant *C =
1631 CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) {
1632 llvm::GlobalVariable* GV =
1633 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1634 llvm::GlobalValue::InternalLinkage, C, "");
1635 EmitFinalDestCopy(ExprToVisit->getType(),
1636 CGF.MakeAddrLValue(GV, ExprToVisit->getType()));
1637 return;
1638 }
1639#endif
1640
1641 AggValueSlot Dest = EnsureSlot(ExprToVisit->getType());
1642
1643 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType());
1644
1645 // Handle initialization of an array.
1646 if (ExprToVisit->getType()->isConstantArrayType()) {
1647 auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1648 EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit,
1649 InitExprs, ArrayFiller);
1650 return;
1651 } else if (ExprToVisit->getType()->isVariableArrayType()) {
1652 // A variable array type that has an initializer can only do empty
1653 // initialization. And because this feature is not exposed as an extension
1654 // in C++, we can safely memset the array memory to zero.
1655 assert(InitExprs.size() == 0 &&
1656 "you can only use an empty initializer with VLAs");
1657 CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType());
1658 return;
1659 }
1660
1661 assert(ExprToVisit->getType()->isRecordType() &&
1662 "Only support structs/unions here!");
1663
1664 // Do struct initialization; this code just sets each individual member
1665 // to the approprate value. This makes bitfield support automatic;
1666 // the disadvantage is that the generated code is more difficult for
1667 // the optimizer, especially with bitfields.
1668 unsigned NumInitElements = InitExprs.size();
1669 RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl();
1670
1671 // We'll need to enter cleanup scopes in case any of the element
1672 // initializers throws an exception.
1674 CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF);
1675
1676 unsigned curInitIndex = 0;
1677
1678 // Emit initialization of base classes.
1679 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1680 assert(NumInitElements >= CXXRD->getNumBases() &&
1681 "missing initializer for base class");
1682 for (auto &Base : CXXRD->bases()) {
1683 assert(!Base.isVirtual() && "should not see vbases here");
1684 auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1686 Dest.getAddress(), CXXRD, BaseRD,
1687 /*isBaseVirtual*/ false);
1689 V, Qualifiers(),
1693 CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1694 CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
1695
1696 if (QualType::DestructionKind dtorKind =
1697 Base.getType().isDestructedType())
1698 CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType());
1699 }
1700 }
1701
1702 // Prepare a 'this' for CXXDefaultInitExprs.
1703 CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1704
1705 const bool ZeroInitPadding =
1706 CGF.CGM.shouldZeroInitPadding() && !Dest.isZeroed();
1707
1708 if (record->isUnion()) {
1709 // Only initialize one field of a union. The field itself is
1710 // specified by the initializer list.
1711 if (!InitializedFieldInUnion) {
1712 // Empty union; we have nothing to do.
1713
1714#ifndef NDEBUG
1715 // Make sure that it's really an empty and not a failure of
1716 // semantic analysis.
1717 for (const auto *Field : record->fields())
1718 assert(
1719 (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) &&
1720 "Only unnamed bitfields or anonymous class allowed");
1721#endif
1722 return;
1723 }
1724
1725 // FIXME: volatility
1726 FieldDecl *Field = InitializedFieldInUnion;
1727
1728 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1729 if (NumInitElements) {
1730 // Store the initializer into the field
1731 EmitInitializationToLValue(InitExprs[0], FieldLoc);
1732 if (ZeroInitPadding) {
1733 uint64_t TotalSize = CGF.getContext().toBits(
1734 Dest.getPreferredSize(CGF.getContext(), DestLV.getType()));
1735 uint64_t FieldSize = CGF.getContext().getTypeSize(FieldLoc.getType());
1736 DoZeroInitPadding(FieldSize, TotalSize, nullptr);
1737 }
1738 } else {
1739 // Default-initialize to null.
1740 if (ZeroInitPadding)
1741 EmitNullInitializationToLValue(DestLV);
1742 else
1743 EmitNullInitializationToLValue(FieldLoc);
1744 }
1745 return;
1746 }
1747
1748 // Here we iterate over the fields; this makes it simpler to both
1749 // default-initialize fields and skip over unnamed fields.
1750 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(record);
1751 uint64_t PaddingStart = 0;
1752
1753 for (const auto *field : record->fields()) {
1754 // We're done once we hit the flexible array member.
1755 if (field->getType()->isIncompleteArrayType())
1756 break;
1757
1758 // Always skip anonymous bitfields.
1759 if (field->isUnnamedBitField())
1760 continue;
1761
1762 // We're done if we reach the end of the explicit initializers, we
1763 // have a zeroed object, and the rest of the fields are
1764 // zero-initializable.
1765 if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1766 CGF.getTypes().isZeroInitializable(ExprToVisit->getType()))
1767 break;
1768
1769 if (ZeroInitPadding)
1770 DoZeroInitPadding(PaddingStart,
1771 Layout.getFieldOffset(field->getFieldIndex()), field);
1772
1773 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1774 // We never generate write-barries for initialized fields.
1775 LV.setNonGC(true);
1776
1777 if (curInitIndex < NumInitElements) {
1778 // Store the initializer into the field.
1779 EmitInitializationToLValue(InitExprs[curInitIndex++], LV);
1780 } else {
1781 // We're out of initializers; default-initialize to null
1782 EmitNullInitializationToLValue(LV);
1783 }
1784
1785 // Push a destructor if necessary.
1786 // FIXME: if we have an array of structures, all explicitly
1787 // initialized, we can end up pushing a linear number of cleanups.
1788 if (QualType::DestructionKind dtorKind
1789 = field->getType().isDestructedType()) {
1790 assert(LV.isSimple());
1791 if (dtorKind) {
1793 field->getType(),
1794 CGF.getDestroyer(dtorKind), false);
1795 }
1796 }
1797 }
1798 if (ZeroInitPadding) {
1799 uint64_t TotalSize = CGF.getContext().toBits(
1800 Dest.getPreferredSize(CGF.getContext(), DestLV.getType()));
1801 DoZeroInitPadding(PaddingStart, TotalSize, nullptr);
1802 }
1803}
1804
1805void AggExprEmitter::DoZeroInitPadding(uint64_t &PaddingStart,
1806 uint64_t PaddingEnd,
1807 const FieldDecl *NextField) {
1808
1809 auto InitBytes = [&](uint64_t StartBit, uint64_t EndBit) {
1810 CharUnits Start = CGF.getContext().toCharUnitsFromBits(StartBit);
1811 CharUnits End = CGF.getContext().toCharUnitsFromBits(EndBit);
1812 Address Addr = Dest.getAddress().withElementType(CGF.CharTy);
1813 if (!Start.isZero())
1814 Addr = Builder.CreateConstGEP(Addr, Start.getQuantity());
1815 llvm::Constant *SizeVal = Builder.getInt64((End - Start).getQuantity());
1816 CGF.Builder.CreateMemSet(Addr, Builder.getInt8(0), SizeVal, false);
1817 };
1818
1819 if (NextField != nullptr && NextField->isBitField()) {
1820 // For bitfield, zero init StorageSize before storing the bits. So we don't
1821 // need to handle big/little endian.
1822 const CGRecordLayout &RL =
1823 CGF.getTypes().getCGRecordLayout(NextField->getParent());
1824 const CGBitFieldInfo &Info = RL.getBitFieldInfo(NextField);
1825 uint64_t StorageStart = CGF.getContext().toBits(Info.StorageOffset);
1826 if (StorageStart + Info.StorageSize > PaddingStart) {
1827 if (StorageStart > PaddingStart)
1828 InitBytes(PaddingStart, StorageStart);
1829 Address Addr = Dest.getAddress();
1830 if (!Info.StorageOffset.isZero())
1831 Addr = Builder.CreateConstGEP(Addr.withElementType(CGF.CharTy),
1832 Info.StorageOffset.getQuantity());
1833 Addr = Addr.withElementType(
1834 llvm::Type::getIntNTy(CGF.getLLVMContext(), Info.StorageSize));
1835 Builder.CreateStore(Builder.getIntN(Info.StorageSize, 0), Addr);
1836 PaddingStart = StorageStart + Info.StorageSize;
1837 }
1838 return;
1839 }
1840
1841 if (PaddingStart < PaddingEnd)
1842 InitBytes(PaddingStart, PaddingEnd);
1843 if (NextField != nullptr)
1844 PaddingStart =
1845 PaddingEnd + CGF.getContext().getTypeSize(NextField->getType());
1846}
1847
1848void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1849 llvm::Value *outerBegin) {
1850 // Emit the common subexpression.
1851 CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1852
1853 Address destPtr = EnsureSlot(E->getType()).getAddress();
1854 uint64_t numElements = E->getArraySize().getZExtValue();
1855
1856 if (!numElements)
1857 return;
1858
1859 // destPtr is an array*. Construct an elementType* by drilling down a level.
1860 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1861 llvm::Value *indices[] = {zero, zero};
1862 llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),
1863 destPtr.emitRawPointer(CGF),
1864 indices, "arrayinit.begin");
1865
1866 // Prepare to special-case multidimensional array initialization: we avoid
1867 // emitting multiple destructor loops in that case.
1868 if (!outerBegin)
1869 outerBegin = begin;
1870 ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1871
1872 QualType elementType =
1874 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1875 CharUnits elementAlign =
1876 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1877 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
1878
1879 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1880 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1881
1882 // Jump into the body.
1883 CGF.EmitBlock(bodyBB);
1884 llvm::PHINode *index =
1885 Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1886 index->addIncoming(zero, entryBB);
1887 llvm::Value *element =
1888 Builder.CreateInBoundsGEP(llvmElementType, begin, index);
1889
1890 // Prepare for a cleanup.
1891 QualType::DestructionKind dtorKind = elementType.isDestructedType();
1893 if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1894 if (outerBegin->getType() != element->getType())
1895 outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1896 CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1897 elementAlign,
1898 CGF.getDestroyer(dtorKind));
1900 } else {
1901 dtorKind = QualType::DK_none;
1902 }
1903
1904 // Emit the actual filler expression.
1905 {
1906 // Temporaries created in an array initialization loop are destroyed
1907 // at the end of each iteration.
1908 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1909 CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1910 LValue elementLV = CGF.MakeAddrLValue(
1911 Address(element, llvmElementType, elementAlign), elementType);
1912
1913 if (InnerLoop) {
1914 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1915 auto elementSlot = AggValueSlot::forLValue(
1916 elementLV, AggValueSlot::IsDestructed,
1919 AggExprEmitter(CGF, elementSlot, false)
1920 .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1921 } else
1922 EmitInitializationToLValue(E->getSubExpr(), elementLV);
1923 }
1924
1925 // Move on to the next element.
1926 llvm::Value *nextIndex = Builder.CreateNUWAdd(
1927 index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1928 index->addIncoming(nextIndex, Builder.GetInsertBlock());
1929
1930 // Leave the loop if we're done.
1931 llvm::Value *done = Builder.CreateICmpEQ(
1932 nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1933 "arrayinit.done");
1934 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1935 Builder.CreateCondBr(done, endBB, bodyBB);
1936
1937 CGF.EmitBlock(endBB);
1938
1939 // Leave the partial-array cleanup if we entered one.
1940 if (dtorKind)
1941 CGF.DeactivateCleanupBlock(cleanup, index);
1942}
1943
1944void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1945 AggValueSlot Dest = EnsureSlot(E->getType());
1946
1947 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1948 EmitInitializationToLValue(E->getBase(), DestLV);
1949 VisitInitListExpr(E->getUpdater());
1950}
1951
1952//===----------------------------------------------------------------------===//
1953// Entry Points into this File
1954//===----------------------------------------------------------------------===//
1955
1956/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1957/// non-zero bytes that will be stored when outputting the initializer for the
1958/// specified initializer expression.
1960 if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1961 E = MTE->getSubExpr();
1963
1964 // 0 and 0.0 won't require any non-zero stores!
1965 if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1966
1967 // If this is an initlist expr, sum up the size of sizes of the (present)
1968 // elements. If this is something weird, assume the whole thing is non-zero.
1969 const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1970 while (ILE && ILE->isTransparent())
1971 ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
1972 if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1973 return CGF.getContext().getTypeSizeInChars(E->getType());
1974
1975 // InitListExprs for structs have to be handled carefully. If there are
1976 // reference members, we need to consider the size of the reference, not the
1977 // referencee. InitListExprs for unions and arrays can't have references.
1978 if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1979 if (!RT->isUnionType()) {
1980 RecordDecl *SD = RT->getDecl();
1981 CharUnits NumNonZeroBytes = CharUnits::Zero();
1982
1983 unsigned ILEElement = 0;
1984 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1985 while (ILEElement != CXXRD->getNumBases())
1986 NumNonZeroBytes +=
1987 GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1988 for (const auto *Field : SD->fields()) {
1989 // We're done once we hit the flexible array member or run out of
1990 // InitListExpr elements.
1991 if (Field->getType()->isIncompleteArrayType() ||
1992 ILEElement == ILE->getNumInits())
1993 break;
1994 if (Field->isUnnamedBitField())
1995 continue;
1996
1997 const Expr *E = ILE->getInit(ILEElement++);
1998
1999 // Reference values are always non-null and have the width of a pointer.
2000 if (Field->getType()->isReferenceType())
2001 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
2002 CGF.getTarget().getPointerWidth(LangAS::Default));
2003 else
2004 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
2005 }
2006
2007 return NumNonZeroBytes;
2008 }
2009 }
2010
2011 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
2012 CharUnits NumNonZeroBytes = CharUnits::Zero();
2013 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
2014 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
2015 return NumNonZeroBytes;
2016}
2017
2018/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
2019/// zeros in it, emit a memset and avoid storing the individual zeros.
2020///
2022 CodeGenFunction &CGF) {
2023 // If the slot is already known to be zeroed, nothing to do. Don't mess with
2024 // volatile stores.
2025 if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
2026 return;
2027
2028 // C++ objects with a user-declared constructor don't need zero'ing.
2029 if (CGF.getLangOpts().CPlusPlus)
2030 if (const RecordType *RT = CGF.getContext()
2032 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
2034 return;
2035 }
2036
2037 // If the type is 16-bytes or smaller, prefer individual stores over memset.
2038 CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
2039 if (Size <= CharUnits::fromQuantity(16))
2040 return;
2041
2042 // Check to see if over 3/4 of the initializer are known to be zero. If so,
2043 // we prefer to emit memset + individual stores for the rest.
2044 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
2045 if (NumNonZeroBytes*4 > Size)
2046 return;
2047
2048 // Okay, it seems like a good idea to use an initial memset, emit the call.
2049 llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
2050
2052 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
2053
2054 // Tell the AggExprEmitter that the slot is known zero.
2055 Slot.setZeroed();
2056}
2057
2058
2059
2060
2061/// EmitAggExpr - Emit the computation of the specified expression of aggregate
2062/// type. The result is computed into DestPtr. Note that if DestPtr is null,
2063/// the value of the aggregate expression is not needed. If VolatileDest is
2064/// true, DestPtr cannot be 0.
2065void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
2066 assert(E && hasAggregateEvaluationKind(E->getType()) &&
2067 "Invalid aggregate expression to emit");
2068 assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
2069 "slot has bits but no address");
2070
2071 // Optimize the slot if possible.
2072 CheckAggExprForMemSetUse(Slot, E, *this);
2073
2074 AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
2075}
2076
2078 assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
2079 Address Temp = CreateMemTemp(E->getType());
2080 LValue LV = MakeAddrLValue(Temp, E->getType());
2085 return LV;
2086}
2087
2089 const LValue &Src,
2090 ExprValueKind SrcKind) {
2091 return AggExprEmitter(*this, Dest, Dest.isIgnored())
2092 .EmitFinalDestCopy(Type, Src, SrcKind);
2093}
2094
2097 if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
2099
2100 // Empty fields can overlap earlier fields.
2101 if (FD->getType()->getAsCXXRecordDecl()->isEmpty())
2103
2104 // If the field lies entirely within the enclosing class's nvsize, its tail
2105 // padding cannot overlap any already-initialized object. (The only subobjects
2106 // with greater addresses that might already be initialized are vbases.)
2107 const RecordDecl *ClassRD = FD->getParent();
2108 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
2109 if (Layout.getFieldOffset(FD->getFieldIndex()) +
2110 getContext().getTypeSize(FD->getType()) <=
2111 (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
2113
2114 // The tail padding may contain values we need to preserve.
2116}
2117
2119 const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
2120 // If the most-derived object is a field declared with [[no_unique_address]],
2121 // the tail padding of any virtual base could be reused for other subobjects
2122 // of that field's class.
2123 if (IsVirtual)
2125
2126 // Empty bases can overlap earlier bases.
2127 if (BaseRD->isEmpty())
2129
2130 // If the base class is laid out entirely within the nvsize of the derived
2131 // class, its tail padding cannot yet be initialized, so we can issue
2132 // stores at the full width of the base class.
2133 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2134 if (Layout.getBaseClassOffset(BaseRD) +
2135 getContext().getASTRecordLayout(BaseRD).getSize() <=
2136 Layout.getNonVirtualSize())
2138
2139 // The tail padding may contain values we need to preserve.
2141}
2142
2144 AggValueSlot::Overlap_t MayOverlap,
2145 bool isVolatile) {
2146 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
2147
2148 Address DestPtr = Dest.getAddress();
2149 Address SrcPtr = Src.getAddress();
2150
2151 if (getLangOpts().CPlusPlus) {
2152 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2153 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
2154 assert((Record->hasTrivialCopyConstructor() ||
2155 Record->hasTrivialCopyAssignment() ||
2156 Record->hasTrivialMoveConstructor() ||
2157 Record->hasTrivialMoveAssignment() ||
2158 Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
2159 "Trying to aggregate-copy a type without a trivial copy/move "
2160 "constructor or assignment operator");
2161 // Ignore empty classes in C++.
2162 if (Record->isEmpty())
2163 return;
2164 }
2165 }
2166
2167 if (getLangOpts().CUDAIsDevice) {
2169 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,
2170 Src))
2171 return;
2172 } else if (Ty->isCUDADeviceBuiltinTextureType()) {
2173 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,
2174 Src))
2175 return;
2176 }
2177 }
2178
2179 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2180 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2181 // read from another object that overlaps in anyway the storage of the first
2182 // object, then the overlap shall be exact and the two objects shall have
2183 // qualified or unqualified versions of a compatible type."
2184 //
2185 // memcpy is not defined if the source and destination pointers are exactly
2186 // equal, but other compilers do this optimization, and almost every memcpy
2187 // implementation handles this case safely. If there is a libc that does not
2188 // safely handle this, we can add a target hook.
2189
2190 // Get data size info for this aggregate. Don't copy the tail padding if this
2191 // might be a potentially-overlapping subobject, since the tail padding might
2192 // be occupied by a different object. Otherwise, copying it is fine.
2194 if (MayOverlap)
2196 else
2198
2199 llvm::Value *SizeVal = nullptr;
2200 if (TypeInfo.Width.isZero()) {
2201 // But note that getTypeInfo returns 0 for a VLA.
2202 if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
2203 getContext().getAsArrayType(Ty))) {
2204 QualType BaseEltTy;
2205 SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
2206 TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
2207 assert(!TypeInfo.Width.isZero());
2208 SizeVal = Builder.CreateNUWMul(
2209 SizeVal,
2210 llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));
2211 }
2212 }
2213 if (!SizeVal) {
2214 SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());
2215 }
2216
2217 // FIXME: If we have a volatile struct, the optimizer can remove what might
2218 // appear to be `extra' memory ops:
2219 //
2220 // volatile struct { int i; } a, b;
2221 //
2222 // int main() {
2223 // a = b;
2224 // a = b;
2225 // }
2226 //
2227 // we need to use a different call here. We use isVolatile to indicate when
2228 // either the source or the destination is volatile.
2229
2230 DestPtr = DestPtr.withElementType(Int8Ty);
2231 SrcPtr = SrcPtr.withElementType(Int8Ty);
2232
2233 // Don't do any of the memmove_collectable tests if GC isn't set.
2234 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
2235 // fall through
2236 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
2237 RecordDecl *Record = RecordTy->getDecl();
2238 if (Record->hasObjectMember()) {
2239 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2240 SizeVal);
2241 return;
2242 }
2243 } else if (Ty->isArrayType()) {
2244 QualType BaseType = getContext().getBaseElementType(Ty);
2245 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
2246 if (RecordTy->getDecl()->hasObjectMember()) {
2247 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2248 SizeVal);
2249 return;
2250 }
2251 }
2252 }
2253
2254 auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2255
2256 // Determine the metadata to describe the position of any padding in this
2257 // memcpy, as well as the TBAA tags for the members of the struct, in case
2258 // the optimizer wishes to expand it in to scalar memory operations.
2259 if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2260 Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2261
2262 if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2264 Dest.getTBAAInfo(), Src.getTBAAInfo());
2265 CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2266 }
2267}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3443
CompareKind
Definition: CGExprAgg.cpp:990
@ CK_Greater
Definition: CGExprAgg.cpp:992
@ CK_Less
Definition: CGExprAgg.cpp:991
@ CK_Equal
Definition: CGExprAgg.cpp:993
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF)
GetNumNonZeroBytesInInit - Get an approximate count of the number of non-zero bytes that will be stor...
Definition: CGExprAgg.cpp:1959
static Expr * findPeephole(Expr *op, CastKind kind, const ASTContext &ctx)
Attempt to look through various unimportant expressions to find a cast of the given kind.
Definition: CGExprAgg.cpp:730
static bool isBlockVarRef(const Expr *E)
Is the value of the given expression possibly a reference to or into a __block variable?
Definition: CGExprAgg.cpp:1141
static bool isTrivialFiller(Expr *E)
Determine if E is a trivial array filler, that is, one that is equivalent to zero-initialization.
Definition: CGExprAgg.cpp:473
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF)
isSimpleZero - If emitting this value will obviously just cause a store of zero to memory,...
Definition: CGExprAgg.cpp:1524
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
Definition: CGExprAgg.cpp:996
static bool castPreservesZero(const CastExpr *CE)
Determine whether the given cast kind is known to always convert values with all zero bits in their v...
Definition: CGExprAgg.cpp:1427
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, CodeGenFunction &CGF)
CheckAggExprForMemSetUse - If the initializer is large and has a lot of zeros in it,...
Definition: CGExprAgg.cpp:2021
Expr * E
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the C++ template declaration subclasses.
llvm::MachO::Record Record
Definition: MachO.h:31
SourceLocation Loc
Definition: SemaObjC.cpp:759
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2915
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
bool hasSameType(QualType T1, QualType T2) const
Determine whether the given types T1 and T2 are equivalent.
Definition: ASTContext.h:2732
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
ComparisonCategories CompCategories
Types and expressions required to build C++2a three-way comparisons using operator<=>,...
Definition: ASTContext.h:2413
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
QualType removeAddrSpaceQualType(QualType T) const
Remove any existing address space on the type and returns the type with qualifiers intact (or that's ...
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2763
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2482
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
unsigned getTargetAddressSpace(LangAS AS) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
Definition: RecordLayout.h:210
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4224
Represents a loop initializing the elements of an array.
Definition: Expr.h:5752
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2718
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3577
QualType getElementType() const
Definition: Type.h:3589
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6678
QualType getValueType() const
Gets the type contained by this atomic type, i.e.
Definition: Type.h:7761
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3909
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1491
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1268
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1375
Expr * getExpr()
Get the initialization expression that will be used.
Definition: ExprCXX.cpp:1084
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition: ExprCXX.h:1737
Represents a list-initialization with parenthesis.
Definition: ExprCXX.h:4960
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool isTriviallyCopyable() const
Determine whether this class is considered trivially copyable per (C++11 [class]p6).
Definition: DeclCXX.cpp:614
bool hasUserDeclaredConstructor() const
Determine whether this class has any user-declared constructors.
Definition: DeclCXX.h:792
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition: DeclCXX.h:1198
A rewritten comparison expression that was originally written using operator syntax.
Definition: ExprCXX.h:283
An expression "T()" which creates an rvalue of a non-class type T.
Definition: ExprCXX.h:2182
Implicit construction of a std::initializer_list<T> object from an array temporary within list-initia...
Definition: ExprCXX.h:797
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1206
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3547
CastKind getCastKind() const
Definition: Expr.h:3591
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
ChooseExpr - GNU builtin-in function __builtin_choose_expr.
Definition: Expr.h:4641
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition: Expr.h:4677
Represents a 'co_await' expression.
Definition: ExprCXX.h:5191
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
void setVolatile(bool flag)
Definition: CGValue.h:623
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
Address getAddress() const
Definition: CGValue.h:644
CharUnits getPreferredSize(ASTContext &Ctx, QualType Type) const
Get the preferred size to use when storing a value to this slot.
Definition: CGValue.h:682
NeedsGCBarriers_t requiresGCollection() const
Definition: CGValue.h:634
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:613
void setZeroed(bool V=true)
Definition: CGValue.h:674
IsZeroed_t isZeroed() const
Definition: CGValue.h:675
Qualifiers getQualifiers() const
Definition: CGValue.h:617
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:602
IsAliased_t isPotentiallyAliased() const
Definition: CGValue.h:654
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
IsDestructed_t isExternallyDestructed() const
Definition: CGValue.h:610
Overlap_t mayOverlap() const
Definition: CGValue.h:658
RValue asRValue() const
Definition: CGValue.h:666
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:640
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:856
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:398
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:365
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:87
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr, llvm::Value *Size)=0
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
static bool hasScalarEvaluationKind(QualType T)
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual)
Determine whether a base class initialization may overlap some other object.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
void callCStructCopyAssignmentOperator(LValue Dst, LValue Src)
void callCStructMoveConstructor(LValue Dst, LValue Src)
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
void callCStructCopyConstructor(LValue Dst, LValue Src)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E)
Emit a call to a constructor inherited from a base class, passing the current constructor's arguments...
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
llvm::Type * ConvertTypeForMem(QualType T)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
const TargetInfo & getTarget() const
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Address GetAddressOfDirectBaseInCompleteClass(Address Value, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual)
GetAddressOfBaseOfCompleteClass - Convert the given pointer to a complete class to the given direct b...
void callCStructMoveAssignmentOperator(LValue Dst, LValue Src)
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
llvm::Type * ConvertType(QualType T)
CodeGenTypes & getTypes() const
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
llvm::LLVMContext & getLLVMContext()
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitAtomicExpr(AtomicExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1263
llvm::MDNode * getTBAAStructInfo(QualType QTy)
llvm::Module & getModule() const
bool isPaddedAtomicType(QualType type)
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
TBAAAccessInfo mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo, TBAAAccessInfo SrcInfo)
mergeTBAAInfoForMemoryTransfer - Get merged TBAA information for the purposes of memory transfer call...
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
bool isPointerZeroInitializable(QualType T)
Check if the pointer type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
Definition: CGCleanup.h:639
LValue - This represents an lvalue references.
Definition: CGValue.h:182
bool isBitField() const
Definition: CGValue.h:280
bool isSimple() const
Definition: CGValue.h:278
Address getAddress() const
Definition: CGValue.h:361
QualType getType() const
Definition: CGValue.h:291
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:335
void setNonGC(bool Value)
Definition: CGValue.h:304
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
llvm::Value * getAggregatePointer(QualType PointeeType, CodeGenFunction &CGF) const
Definition: CGValue.h:88
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:125
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
bool isComplex() const
Definition: CGValue.h:65
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
An abstract representation of an aligned address.
Definition: Address.h:42
llvm::Value * getPointer() const
Definition: Address.h:66
static RawAddress invalid()
Definition: Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:386
const ComparisonCategoryInfo & getInfoForType(QualType Ty) const
Return the comparison category information as specified by getCategoryForType(Ty).
bool isPartial() const
True iff the comparison is not totally ordered.
const ValueInfo * getLess() const
const ValueInfo * getUnordered() const
const CXXRecordDecl * Record
The declaration for the comparison category type from the standard library.
const ValueInfo * getGreater() const
const ValueInfo * getEqualOrEquiv() const
Complex values, per C99 6.2.5p11.
Definition: Type.h:3145
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3477
Represents the canonical version of C arrays with a specified constant size.
Definition: Type.h:3615
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
Represents a 'co_yield' expression.
Definition: ExprCXX.h:5272
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2369
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
bool hasAttr() const
Definition: DeclBase.h:580
Represents an expression – generally a full-expression – that introduces cleanups to be run at the en...
Definition: ExprCXX.h:3474
This represents one expression.
Definition: Expr.h:110
bool isGLValue() const
Definition: Expr.h:280
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3117
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3086
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3587
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3033
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:3124
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.cpp:4654
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3250
Represents a C11 generic selection.
Definition: Expr.h:5966
Represents an implicitly-generated value initialization of an object of a given type.
Definition: Expr.h:5841
Describes an C or C++ initializer list.
Definition: Expr.h:5088
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition: Expr.cpp:2460
unsigned getNumInits() const
Definition: Expr.h:5118
const Expr * getInit(unsigned Init) const
Definition: Expr.h:5134
A C++ lambda expression, which produces a function object (of unspecified type) that can be invoked l...
Definition: ExprCXX.h:1954
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: ExprCXX.h:2066
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4734
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3236
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3519
Represents a place-holder for an object not to be initialized by anything.
Definition: Expr.h:5661
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:941
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1173
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1223
bool isUnique() const
Definition: Expr.h:1231
ParenExpr - This represents a parenthesized expression, e.g.
Definition: Expr.h:2170
const Expr * getSubExpr() const
Definition: Expr.h:2187
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1991
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6546
A (possibly-)qualified type.
Definition: Type.h:929
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:8015
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition: Type.cpp:2796
@ DK_nontrivial_c_struct
Definition: Type.h:1524
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:8057
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: Type.h:1531
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition: Type.cpp:2641
@ PCK_Struct
The type is a struct containing a field whose type is neither PCK_Trivial nor PCK_VolatileTrivial.
Definition: Type.h:1503
The collection of all-type qualifiers we support.
Definition: Type.h:324
Represents a struct/union/class.
Definition: Decl.h:4148
bool hasObjectMember() const
Definition: Decl.h:4208
field_range fields() const
Definition: Decl.h:4354
field_iterator field_begin() const
Definition: Decl.cpp:5092
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:6072
RecordDecl * getDecl() const
Definition: Type.h:6082
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4466
RetTy Visit(PTR(Stmt) S, ParamTys... P)
Definition: StmtVisitor.h:44
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:185
Stmt - This represents one statement.
Definition: Stmt.h:84
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
Represents a reference to a non-type template parameter that has been substituted with a template arg...
Definition: ExprCXX.h:4490
bool isUnion() const
Definition: Decl.h:3770
uint64_t getPointerWidth(LangAS AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:478
The base class of the type hierarchy.
Definition: Type.h:1828
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1916
bool isConstantArrayType() const
Definition: Type.h:8262
bool isArrayType() const
Definition: Type.h:8258
bool isPointerType() const
Definition: Type.h:8186
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8800
bool isVariableArrayType() const
Definition: Type.h:8270
bool isCUDADeviceBuiltinSurfaceType() const
Check if the type is the CUDA device builtin surface type.
Definition: Type.cpp:5072
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition: Type.h:8625
bool isAnyComplexType() const
Definition: Type.h:8294
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition: Type.cpp:2220
bool isMemberPointerType() const
Definition: Type.h:8240
bool isAtomicType() const
Definition: Type.h:8341
bool isCUDADeviceBuiltinTextureType() const
Check if the type is the CUDA device builtin texture type.
Definition: Type.cpp:5079
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
Definition: Type.cpp:2292
bool isRealFloatingType() const
Floating point categories.
Definition: Type.cpp:2300
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8731
bool isNullPtrType() const
Definition: Type.h:8543
bool isRecordType() const
Definition: Type.h:8286
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2232
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4750
QualType getType() const
Definition: Decl.h:682
Represents a variable declaration or definition.
Definition: Decl.h:882
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< AtomicType > atomicType
Matches atomic types.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:3869
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2408
bool GE(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1186
The JSON file list parser is used to communicate input to InstallAPI.
@ CPlusPlus
Definition: LangStandard.h:55
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
CastKind
CastKind - The kind of operation required for a conversion.
ExprValueKind
The categorization of expression values, currently following the C++11 scheme.
Definition: Specifiers.h:132
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition: Address.h:325
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * CharTy
char
uint64_t Width
Definition: ASTContext.h:159