clang 19.0.0git
CGExprAgg.cpp
Go to the documentation of this file.
1//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGObjCRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "ConstantEmitter.h"
18#include "EHScopeStack.h"
19#include "TargetInfo.h"
21#include "clang/AST/Attr.h"
22#include "clang/AST/DeclCXX.h"
25#include "llvm/IR/Constants.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/GlobalVariable.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/IntrinsicInst.h"
30#include "llvm/IR/Intrinsics.h"
31using namespace clang;
32using namespace CodeGen;
33
34//===----------------------------------------------------------------------===//
35// Aggregate Expression Emitter
36//===----------------------------------------------------------------------===//
37
38namespace llvm {
39extern cl::opt<bool> EnableSingleByteCoverage;
40} // namespace llvm
41
42namespace {
43class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
44 CodeGenFunction &CGF;
45 CGBuilderTy &Builder;
46 AggValueSlot Dest;
47 bool IsResultUnused;
48
49 AggValueSlot EnsureSlot(QualType T) {
50 if (!Dest.isIgnored()) return Dest;
51 return CGF.CreateAggTemp(T, "agg.tmp.ensured");
52 }
53 void EnsureDest(QualType T) {
54 if (!Dest.isIgnored()) return;
55 Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
56 }
57
58 // Calls `Fn` with a valid return value slot, potentially creating a temporary
59 // to do so. If a temporary is created, an appropriate copy into `Dest` will
60 // be emitted, as will lifetime markers.
61 //
62 // The given function should take a ReturnValueSlot, and return an RValue that
63 // points to said slot.
64 void withReturnValueSlot(const Expr *E,
65 llvm::function_ref<RValue(ReturnValueSlot)> Fn);
66
67public:
68 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
69 : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
70 IsResultUnused(IsResultUnused) { }
71
72 //===--------------------------------------------------------------------===//
73 // Utilities
74 //===--------------------------------------------------------------------===//
75
76 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
77 /// represents a value lvalue, this method emits the address of the lvalue,
78 /// then loads the result into DestPtr.
79 void EmitAggLoadOfLValue(const Expr *E);
80
81 enum ExprValueKind {
82 EVK_RValue,
83 EVK_NonRValue
84 };
85
86 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
87 /// SrcIsRValue is true if source comes from an RValue.
88 void EmitFinalDestCopy(QualType type, const LValue &src,
89 ExprValueKind SrcValueKind = EVK_NonRValue);
90 void EmitFinalDestCopy(QualType type, RValue src);
91 void EmitCopy(QualType type, const AggValueSlot &dest,
92 const AggValueSlot &src);
93
94 void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,
95 Expr *ExprToVisit, ArrayRef<Expr *> Args,
96 Expr *ArrayFiller);
97
99 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
102 }
103
104 bool TypeRequiresGCollection(QualType T);
105
106 //===--------------------------------------------------------------------===//
107 // Visitor Methods
108 //===--------------------------------------------------------------------===//
109
110 void Visit(Expr *E) {
111 ApplyDebugLocation DL(CGF, E);
113 }
114
115 void VisitStmt(Stmt *S) {
116 CGF.ErrorUnsupported(S, "aggregate expression");
117 }
118 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
119 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
120 Visit(GE->getResultExpr());
121 }
122 void VisitCoawaitExpr(CoawaitExpr *E) {
123 CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
124 }
125 void VisitCoyieldExpr(CoyieldExpr *E) {
126 CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
127 }
128 void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
129 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
130 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
131 return Visit(E->getReplacement());
132 }
133
134 void VisitConstantExpr(ConstantExpr *E) {
135 EnsureDest(E->getType());
136
137 if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
138 Address StoreDest = Dest.getAddress();
139 // The emitted value is guaranteed to have the same size as the
140 // destination but can have a different type. Just do a bitcast in this
141 // case to avoid incorrect GEPs.
142 if (Result->getType() != StoreDest.getType())
143 StoreDest = StoreDest.withElementType(Result->getType());
144
145 CGF.EmitAggregateStore(Result, StoreDest,
147 return;
148 }
149 return Visit(E->getSubExpr());
150 }
151
152 // l-values.
153 void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
154 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
155 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
156 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
157 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
158 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
159 EmitAggLoadOfLValue(E);
160 }
161 void VisitPredefinedExpr(const PredefinedExpr *E) {
162 EmitAggLoadOfLValue(E);
163 }
164
165 // Operators.
166 void VisitCastExpr(CastExpr *E);
167 void VisitCallExpr(const CallExpr *E);
168 void VisitStmtExpr(const StmtExpr *E);
169 void VisitBinaryOperator(const BinaryOperator *BO);
170 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
171 void VisitBinAssign(const BinaryOperator *E);
172 void VisitBinComma(const BinaryOperator *E);
173 void VisitBinCmp(const BinaryOperator *E);
174 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
176 }
177
178 void VisitObjCMessageExpr(ObjCMessageExpr *E);
179 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
180 EmitAggLoadOfLValue(E);
181 }
182
183 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
184 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
185 void VisitChooseExpr(const ChooseExpr *CE);
186 void VisitInitListExpr(InitListExpr *E);
187 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
188 FieldDecl *InitializedFieldInUnion,
189 Expr *ArrayFiller);
190 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
191 llvm::Value *outerBegin = nullptr);
192 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
193 void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
194 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
195 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
196 Visit(DAE->getExpr());
197 }
198 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
199 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
200 Visit(DIE->getExpr());
201 }
202 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
203 void VisitCXXConstructExpr(const CXXConstructExpr *E);
204 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
205 void VisitLambdaExpr(LambdaExpr *E);
206 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
207 void VisitExprWithCleanups(ExprWithCleanups *E);
208 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
209 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
210 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
211 void VisitOpaqueValueExpr(OpaqueValueExpr *E);
212
213 void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
214 if (E->isGLValue()) {
215 LValue LV = CGF.EmitPseudoObjectLValue(E);
216 return EmitFinalDestCopy(E->getType(), LV);
217 }
218
219 AggValueSlot Slot = EnsureSlot(E->getType());
220 bool NeedsDestruction =
221 !Slot.isExternallyDestructed() &&
223 if (NeedsDestruction)
225 CGF.EmitPseudoObjectRValue(E, Slot);
226 if (NeedsDestruction)
228 E->getType());
229 }
230
231 void VisitVAArgExpr(VAArgExpr *E);
232 void VisitCXXParenListInitExpr(CXXParenListInitExpr *E);
233 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
234 Expr *ArrayFiller);
235
236 void EmitInitializationToLValue(Expr *E, LValue Address);
237 void EmitNullInitializationToLValue(LValue Address);
238 // case Expr::ChooseExprClass:
239 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
240 void VisitAtomicExpr(AtomicExpr *E) {
241 RValue Res = CGF.EmitAtomicExpr(E);
242 EmitFinalDestCopy(E->getType(), Res);
243 }
244 void VisitPackIndexingExpr(PackIndexingExpr *E) {
246 }
247};
248} // end anonymous namespace.
249
250//===----------------------------------------------------------------------===//
251// Utilities
252//===----------------------------------------------------------------------===//
253
254/// EmitAggLoadOfLValue - Given an expression with aggregate type that
255/// represents a value lvalue, this method emits the address of the lvalue,
256/// then loads the result into DestPtr.
257void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
258 LValue LV = CGF.EmitLValue(E);
259
260 // If the type of the l-value is atomic, then do an atomic load.
262 CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
263 return;
264 }
265
266 EmitFinalDestCopy(E->getType(), LV);
267}
268
269/// True if the given aggregate type requires special GC API calls.
270bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
271 // Only record types have members that might require garbage collection.
272 const RecordType *RecordTy = T->getAs<RecordType>();
273 if (!RecordTy) return false;
274
275 // Don't mess with non-trivial C++ types.
276 RecordDecl *Record = RecordTy->getDecl();
277 if (isa<CXXRecordDecl>(Record) &&
278 (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
279 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
280 return false;
281
282 // Check whether the type has an object member.
283 return Record->hasObjectMember();
284}
285
286void AggExprEmitter::withReturnValueSlot(
287 const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
288 QualType RetTy = E->getType();
289 bool RequiresDestruction =
290 !Dest.isExternallyDestructed() &&
292
293 // If it makes no observable difference, save a memcpy + temporary.
294 //
295 // We need to always provide our own temporary if destruction is required.
296 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
297 // its lifetime before we have the chance to emit a proper destructor call.
298 bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
299 (RequiresDestruction && Dest.isIgnored());
300
301 Address RetAddr = Address::invalid();
302 RawAddress RetAllocaAddr = RawAddress::invalid();
303
304 EHScopeStack::stable_iterator LifetimeEndBlock;
305 llvm::Value *LifetimeSizePtr = nullptr;
306 llvm::IntrinsicInst *LifetimeStartInst = nullptr;
307 if (!UseTemp) {
308 RetAddr = Dest.getAddress();
309 } else {
310 RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);
311 llvm::TypeSize Size =
312 CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
313 LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());
314 if (LifetimeSizePtr) {
315 LifetimeStartInst =
316 cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
317 assert(LifetimeStartInst->getIntrinsicID() ==
318 llvm::Intrinsic::lifetime_start &&
319 "Last insertion wasn't a lifetime.start?");
320
321 CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
322 NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr);
323 LifetimeEndBlock = CGF.EHStack.stable_begin();
324 }
325 }
326
327 RValue Src =
328 EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
329 Dest.isExternallyDestructed()));
330
331 if (!UseTemp)
332 return;
333
334 assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=
335 Src.getAggregatePointer(E->getType(), CGF));
336 EmitFinalDestCopy(E->getType(), Src);
337
338 if (!RequiresDestruction && LifetimeStartInst) {
339 // If there's no dtor to run, the copy was the last use of our temporary.
340 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
341 // eagerly.
342 CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
343 CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer());
344 }
345}
346
347/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
348void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
349 assert(src.isAggregate() && "value must be aggregate value!");
350 LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
351 EmitFinalDestCopy(type, srcLV, EVK_RValue);
352}
353
354/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
355void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
356 ExprValueKind SrcValueKind) {
357 // If Dest is ignored, then we're evaluating an aggregate expression
358 // in a context that doesn't care about the result. Note that loads
359 // from volatile l-values force the existence of a non-ignored
360 // destination.
361 if (Dest.isIgnored())
362 return;
363
364 // Copy non-trivial C structs here.
365 LValue DstLV = CGF.MakeAddrLValue(
366 Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
367
368 if (SrcValueKind == EVK_RValue) {
369 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
370 if (Dest.isPotentiallyAliased())
371 CGF.callCStructMoveAssignmentOperator(DstLV, src);
372 else
373 CGF.callCStructMoveConstructor(DstLV, src);
374 return;
375 }
376 } else {
377 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
378 if (Dest.isPotentiallyAliased())
379 CGF.callCStructCopyAssignmentOperator(DstLV, src);
380 else
381 CGF.callCStructCopyConstructor(DstLV, src);
382 return;
383 }
384 }
385
387 src, CGF, AggValueSlot::IsDestructed, needsGC(type),
389 EmitCopy(type, Dest, srcAgg);
390}
391
392/// Perform a copy from the source into the destination.
393///
394/// \param type - the type of the aggregate being copied; qualifiers are
395/// ignored
396void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
397 const AggValueSlot &src) {
398 if (dest.requiresGCollection()) {
399 CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
400 llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
402 dest.getAddress(),
403 src.getAddress(),
404 size);
405 return;
406 }
407
408 // If the result of the assignment is used, copy the LHS there also.
409 // It's volatile if either side is. Use the minimum alignment of
410 // the two sides.
411 LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
412 LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
413 CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
414 dest.isVolatile() || src.isVolatile());
415}
416
417/// Emit the initializer for a std::initializer_list initialized with a
418/// real initializer list.
419void
420AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
421 // Emit an array containing the elements. The array is externally destructed
422 // if the std::initializer_list object is.
423 ASTContext &Ctx = CGF.getContext();
424 LValue Array = CGF.EmitLValue(E->getSubExpr());
425 assert(Array.isSimple() && "initializer_list array not a simple lvalue");
426 Address ArrayPtr = Array.getAddress(CGF);
427
430 assert(ArrayType && "std::initializer_list constructed from non-array");
431
432 // FIXME: Perform the checks on the field types in SemaInit.
433 RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
434 RecordDecl::field_iterator Field = Record->field_begin();
435 if (Field == Record->field_end()) {
436 CGF.ErrorUnsupported(E, "weird std::initializer_list");
437 return;
438 }
439
440 // Start pointer.
441 if (!Field->getType()->isPointerType() ||
442 !Ctx.hasSameType(Field->getType()->getPointeeType(),
444 CGF.ErrorUnsupported(E, "weird std::initializer_list");
445 return;
446 }
447
448 AggValueSlot Dest = EnsureSlot(E->getType());
449 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
450 LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
451 llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
452 llvm::Value *IdxStart[] = { Zero, Zero };
453 llvm::Value *ArrayStart = Builder.CreateInBoundsGEP(
454 ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxStart,
455 "arraystart");
456 CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
457 ++Field;
458
459 if (Field == Record->field_end()) {
460 CGF.ErrorUnsupported(E, "weird std::initializer_list");
461 return;
462 }
463
464 llvm::Value *Size = Builder.getInt(ArrayType->getSize());
465 LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
466 if (Field->getType()->isPointerType() &&
467 Ctx.hasSameType(Field->getType()->getPointeeType(),
469 // End pointer.
470 llvm::Value *IdxEnd[] = { Zero, Size };
471 llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
472 ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
473 "arrayend");
474 CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
475 } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
476 // Length.
477 CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
478 } else {
479 CGF.ErrorUnsupported(E, "weird std::initializer_list");
480 return;
481 }
482}
483
484/// Determine if E is a trivial array filler, that is, one that is
485/// equivalent to zero-initialization.
486static bool isTrivialFiller(Expr *E) {
487 if (!E)
488 return true;
489
490 if (isa<ImplicitValueInitExpr>(E))
491 return true;
492
493 if (auto *ILE = dyn_cast<InitListExpr>(E)) {
494 if (ILE->getNumInits())
495 return false;
496 return isTrivialFiller(ILE->getArrayFiller());
497 }
498
499 if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
500 return Cons->getConstructor()->isDefaultConstructor() &&
501 Cons->getConstructor()->isTrivial();
502
503 // FIXME: Are there other cases where we can avoid emitting an initializer?
504 return false;
505}
506
507/// Emit initialization of an array from an initializer list. ExprToVisit must
508/// be either an InitListEpxr a CXXParenInitListExpr.
509void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
510 QualType ArrayQTy, Expr *ExprToVisit,
511 ArrayRef<Expr *> Args, Expr *ArrayFiller) {
512 uint64_t NumInitElements = Args.size();
513
514 uint64_t NumArrayElements = AType->getNumElements();
515 assert(NumInitElements <= NumArrayElements);
516
517 QualType elementType =
518 CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
519
520 // DestPtr is an array*. Construct an elementType* by drilling
521 // down a level.
522 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
523 llvm::Value *indices[] = { zero, zero };
524 llvm::Value *begin = Builder.CreateInBoundsGEP(DestPtr.getElementType(),
525 DestPtr.emitRawPointer(CGF),
526 indices, "arrayinit.begin");
527
528 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
529 CharUnits elementAlign =
530 DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
531 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
532
533 // Consider initializing the array by copying from a global. For this to be
534 // more efficient than per-element initialization, the size of the elements
535 // with explicit initializers should be large enough.
536 if (NumInitElements * elementSize.getQuantity() > 16 &&
537 elementType.isTriviallyCopyableType(CGF.getContext())) {
538 CodeGen::CodeGenModule &CGM = CGF.CGM;
540 QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType(
541 CGM.getContext().removeAddrSpaceQualType(ArrayQTy),
543 LangAS AS = GVArrayQTy.getAddressSpace();
544 if (llvm::Constant *C =
545 Emitter.tryEmitForInitializer(ExprToVisit, AS, GVArrayQTy)) {
546 auto GV = new llvm::GlobalVariable(
547 CGM.getModule(), C->getType(),
548 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
549 "constinit",
550 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
552 Emitter.finalize(GV);
553 CharUnits Align = CGM.getContext().getTypeAlignInChars(GVArrayQTy);
554 GV->setAlignment(Align.getAsAlign());
555 Address GVAddr(GV, GV->getValueType(), Align);
556 EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, GVArrayQTy));
557 return;
558 }
559 }
560
561 // Exception safety requires us to destroy all the
562 // already-constructed members if an initializer throws.
563 // For that, we'll need an EH cleanup.
564 QualType::DestructionKind dtorKind = elementType.isDestructedType();
565 Address endOfInit = Address::invalid();
566 CodeGenFunction::CleanupDeactivationScope deactivation(CGF);
567
568 if (dtorKind) {
569 CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF);
570 // In principle we could tell the cleanup where we are more
571 // directly, but the control flow can get so varied here that it
572 // would actually be quite complex. Therefore we go through an
573 // alloca.
574 llvm::Instruction *dominatingIP =
575 Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy));
576 endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
577 "arrayinit.endOfInit");
578 Builder.CreateStore(begin, endOfInit);
579 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
580 elementAlign,
581 CGF.getDestroyer(dtorKind));
582 cast<EHCleanupScope>(*CGF.EHStack.find(CGF.EHStack.stable_begin()))
583 .AddAuxAllocas(allocaTracker.Take());
584
586 {CGF.EHStack.stable_begin(), dominatingIP});
587 }
588
589 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
590
591 // The 'current element to initialize'. The invariants on this
592 // variable are complicated. Essentially, after each iteration of
593 // the loop, it points to the last initialized element, except
594 // that it points to the beginning of the array before any
595 // elements have been initialized.
596 llvm::Value *element = begin;
597
598 // Emit the explicit initializers.
599 for (uint64_t i = 0; i != NumInitElements; ++i) {
600 // Advance to the next element.
601 if (i > 0) {
602 element = Builder.CreateInBoundsGEP(
603 llvmElementType, element, one, "arrayinit.element");
604
605 // Tell the cleanup that it needs to destroy up to this
606 // element. TODO: some of these stores can be trivially
607 // observed to be unnecessary.
608 if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
609 }
610
611 LValue elementLV = CGF.MakeAddrLValue(
612 Address(element, llvmElementType, elementAlign), elementType);
613 EmitInitializationToLValue(Args[i], elementLV);
614 }
615
616 // Check whether there's a non-trivial array-fill expression.
617 bool hasTrivialFiller = isTrivialFiller(ArrayFiller);
618
619 // Any remaining elements need to be zero-initialized, possibly
620 // using the filler expression. We can skip this if the we're
621 // emitting to zeroed memory.
622 if (NumInitElements != NumArrayElements &&
623 !(Dest.isZeroed() && hasTrivialFiller &&
624 CGF.getTypes().isZeroInitializable(elementType))) {
625
626 // Use an actual loop. This is basically
627 // do { *array++ = filler; } while (array != end);
628
629 // Advance to the start of the rest of the array.
630 if (NumInitElements) {
631 element = Builder.CreateInBoundsGEP(
632 llvmElementType, element, one, "arrayinit.start");
633 if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
634 }
635
636 // Compute the end of the array.
637 llvm::Value *end = Builder.CreateInBoundsGEP(
638 llvmElementType, begin,
639 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end");
640
641 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
642 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
643
644 // Jump into the body.
645 CGF.EmitBlock(bodyBB);
646 llvm::PHINode *currentElement =
647 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
648 currentElement->addIncoming(element, entryBB);
649
650 // Emit the actual filler expression.
651 {
652 // C++1z [class.temporary]p5:
653 // when a default constructor is called to initialize an element of
654 // an array with no corresponding initializer [...] the destruction of
655 // every temporary created in a default argument is sequenced before
656 // the construction of the next array element, if any
657 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
658 LValue elementLV = CGF.MakeAddrLValue(
659 Address(currentElement, llvmElementType, elementAlign), elementType);
660 if (ArrayFiller)
661 EmitInitializationToLValue(ArrayFiller, elementLV);
662 else
663 EmitNullInitializationToLValue(elementLV);
664 }
665
666 // Move on to the next element.
667 llvm::Value *nextElement = Builder.CreateInBoundsGEP(
668 llvmElementType, currentElement, one, "arrayinit.next");
669
670 // Tell the EH cleanup that we finished with the last element.
671 if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
672
673 // Leave the loop if we're done.
674 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
675 "arrayinit.done");
676 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
677 Builder.CreateCondBr(done, endBB, bodyBB);
678 currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
679
680 CGF.EmitBlock(endBB);
681 }
682}
683
684//===----------------------------------------------------------------------===//
685// Visitor Methods
686//===----------------------------------------------------------------------===//
687
688void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
689 Visit(E->getSubExpr());
690}
691
692void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
693 // If this is a unique OVE, just visit its source expression.
694 if (e->isUnique())
695 Visit(e->getSourceExpr());
696 else
697 EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
698}
699
700void
701AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
702 if (Dest.isPotentiallyAliased() &&
703 E->getType().isPODType(CGF.getContext())) {
704 // For a POD type, just emit a load of the lvalue + a copy, because our
705 // compound literal might alias the destination.
706 EmitAggLoadOfLValue(E);
707 return;
708 }
709
710 AggValueSlot Slot = EnsureSlot(E->getType());
711
712 // Block-scope compound literals are destroyed at the end of the enclosing
713 // scope in C.
714 bool Destruct =
715 !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
716 if (Destruct)
718
719 CGF.EmitAggExpr(E->getInitializer(), Slot);
720
721 if (Destruct)
724 CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),
725 CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);
726}
727
728/// Attempt to look through various unimportant expressions to find a
729/// cast of the given kind.
730static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
731 op = op->IgnoreParenNoopCasts(ctx);
732 if (auto castE = dyn_cast<CastExpr>(op)) {
733 if (castE->getCastKind() == kind)
734 return castE->getSubExpr();
735 }
736 return nullptr;
737}
738
739void AggExprEmitter::VisitCastExpr(CastExpr *E) {
740 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
741 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
742 switch (E->getCastKind()) {
743 case CK_Dynamic: {
744 // FIXME: Can this actually happen? We have no test coverage for it.
745 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
746 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
747 CodeGenFunction::TCK_Load);
748 // FIXME: Do we also need to handle property references here?
749 if (LV.isSimple())
750 CGF.EmitDynamicCast(LV.getAddress(CGF), cast<CXXDynamicCastExpr>(E));
751 else
752 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
753
754 if (!Dest.isIgnored())
755 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
756 break;
757 }
758
759 case CK_ToUnion: {
760 // Evaluate even if the destination is ignored.
761 if (Dest.isIgnored()) {
763 /*ignoreResult=*/true);
764 break;
765 }
766
767 // GCC union extension
768 QualType Ty = E->getSubExpr()->getType();
769 Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty));
770 EmitInitializationToLValue(E->getSubExpr(),
771 CGF.MakeAddrLValue(CastPtr, Ty));
772 break;
773 }
774
775 case CK_LValueToRValueBitCast: {
776 if (Dest.isIgnored()) {
778 /*ignoreResult=*/true);
779 break;
780 }
781
782 LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
783 Address SourceAddress =
784 SourceLV.getAddress(CGF).withElementType(CGF.Int8Ty);
785 Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
786 llvm::Value *SizeVal = llvm::ConstantInt::get(
787 CGF.SizeTy,
789 Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
790 break;
791 }
792
793 case CK_DerivedToBase:
794 case CK_BaseToDerived:
795 case CK_UncheckedDerivedToBase: {
796 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
797 "should have been unpacked before we got here");
798 }
799
800 case CK_NonAtomicToAtomic:
801 case CK_AtomicToNonAtomic: {
802 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
803
804 // Determine the atomic and value types.
806 QualType valueType = E->getType();
807 if (isToAtomic) std::swap(atomicType, valueType);
808
809 assert(atomicType->isAtomicType());
810 assert(CGF.getContext().hasSameUnqualifiedType(valueType,
811 atomicType->castAs<AtomicType>()->getValueType()));
812
813 // Just recurse normally if we're ignoring the result or the
814 // atomic type doesn't change representation.
815 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
816 return Visit(E->getSubExpr());
817 }
818
819 CastKind peepholeTarget =
820 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
821
822 // These two cases are reverses of each other; try to peephole them.
823 if (Expr *op =
824 findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
825 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
826 E->getType()) &&
827 "peephole significantly changed types?");
828 return Visit(op);
829 }
830
831 // If we're converting an r-value of non-atomic type to an r-value
832 // of atomic type, just emit directly into the relevant sub-object.
833 if (isToAtomic) {
834 AggValueSlot valueDest = Dest;
835 if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
836 // Zero-initialize. (Strictly speaking, we only need to initialize
837 // the padding at the end, but this is simpler.)
838 if (!Dest.isZeroed())
840
841 // Build a GEP to refer to the subobject.
842 Address valueAddr =
843 CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
844 valueDest = AggValueSlot::forAddr(valueAddr,
845 valueDest.getQualifiers(),
846 valueDest.isExternallyDestructed(),
847 valueDest.requiresGCollection(),
848 valueDest.isPotentiallyAliased(),
851 }
852
853 CGF.EmitAggExpr(E->getSubExpr(), valueDest);
854 return;
855 }
856
857 // Otherwise, we're converting an atomic type to a non-atomic type.
858 // Make an atomic temporary, emit into that, and then copy the value out.
859 AggValueSlot atomicSlot =
860 CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
861 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
862
863 Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
864 RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
865 return EmitFinalDestCopy(valueType, rvalue);
866 }
867 case CK_AddressSpaceConversion:
868 return Visit(E->getSubExpr());
869
870 case CK_LValueToRValue:
871 // If we're loading from a volatile type, force the destination
872 // into existence.
873 if (E->getSubExpr()->getType().isVolatileQualified()) {
874 bool Destruct =
875 !Dest.isExternallyDestructed() &&
877 if (Destruct)
879 EnsureDest(E->getType());
880 Visit(E->getSubExpr());
881
882 if (Destruct)
884 E->getType());
885
886 return;
887 }
888
889 [[fallthrough]];
890
891 case CK_HLSLArrayRValue:
892 Visit(E->getSubExpr());
893 break;
894
895 case CK_NoOp:
896 case CK_UserDefinedConversion:
897 case CK_ConstructorConversion:
899 E->getType()) &&
900 "Implicit cast types must be compatible");
901 Visit(E->getSubExpr());
902 break;
903
904 case CK_LValueBitCast:
905 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
906
907 case CK_Dependent:
908 case CK_BitCast:
909 case CK_ArrayToPointerDecay:
910 case CK_FunctionToPointerDecay:
911 case CK_NullToPointer:
912 case CK_NullToMemberPointer:
913 case CK_BaseToDerivedMemberPointer:
914 case CK_DerivedToBaseMemberPointer:
915 case CK_MemberPointerToBoolean:
916 case CK_ReinterpretMemberPointer:
917 case CK_IntegralToPointer:
918 case CK_PointerToIntegral:
919 case CK_PointerToBoolean:
920 case CK_ToVoid:
921 case CK_VectorSplat:
922 case CK_IntegralCast:
923 case CK_BooleanToSignedIntegral:
924 case CK_IntegralToBoolean:
925 case CK_IntegralToFloating:
926 case CK_FloatingToIntegral:
927 case CK_FloatingToBoolean:
928 case CK_FloatingCast:
929 case CK_CPointerToObjCPointerCast:
930 case CK_BlockPointerToObjCPointerCast:
931 case CK_AnyPointerToBlockPointerCast:
932 case CK_ObjCObjectLValueCast:
933 case CK_FloatingRealToComplex:
934 case CK_FloatingComplexToReal:
935 case CK_FloatingComplexToBoolean:
936 case CK_FloatingComplexCast:
937 case CK_FloatingComplexToIntegralComplex:
938 case CK_IntegralRealToComplex:
939 case CK_IntegralComplexToReal:
940 case CK_IntegralComplexToBoolean:
941 case CK_IntegralComplexCast:
942 case CK_IntegralComplexToFloatingComplex:
943 case CK_ARCProduceObject:
944 case CK_ARCConsumeObject:
945 case CK_ARCReclaimReturnedObject:
946 case CK_ARCExtendBlockObject:
947 case CK_CopyAndAutoreleaseBlockObject:
948 case CK_BuiltinFnToFnPtr:
949 case CK_ZeroToOCLOpaqueType:
950 case CK_MatrixCast:
951 case CK_HLSLVectorTruncation:
952
953 case CK_IntToOCLSampler:
954 case CK_FloatingToFixedPoint:
955 case CK_FixedPointToFloating:
956 case CK_FixedPointCast:
957 case CK_FixedPointToBoolean:
958 case CK_FixedPointToIntegral:
959 case CK_IntegralToFixedPoint:
960 llvm_unreachable("cast kind invalid for aggregate types");
961 }
962}
963
964void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
966 EmitAggLoadOfLValue(E);
967 return;
968 }
969
970 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
971 return CGF.EmitCallExpr(E, Slot);
972 });
973}
974
975void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
976 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
977 return CGF.EmitObjCMessageExpr(E, Slot);
978 });
979}
980
981void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
982 CGF.EmitIgnoredExpr(E->getLHS());
983 Visit(E->getRHS());
984}
985
986void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
987 CodeGenFunction::StmtExprEvaluation eval(CGF);
988 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
989}
990
995};
996
997static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
998 const BinaryOperator *E, llvm::Value *LHS,
999 llvm::Value *RHS, CompareKind Kind,
1000 const char *NameSuffix = "") {
1001 QualType ArgTy = E->getLHS()->getType();
1002 if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
1003 ArgTy = CT->getElementType();
1004
1005 if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
1006 assert(Kind == CK_Equal &&
1007 "member pointers may only be compared for equality");
1009 CGF, LHS, RHS, MPT, /*IsInequality*/ false);
1010 }
1011
1012 // Compute the comparison instructions for the specified comparison kind.
1013 struct CmpInstInfo {
1014 const char *Name;
1015 llvm::CmpInst::Predicate FCmp;
1016 llvm::CmpInst::Predicate SCmp;
1017 llvm::CmpInst::Predicate UCmp;
1018 };
1019 CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
1020 using FI = llvm::FCmpInst;
1021 using II = llvm::ICmpInst;
1022 switch (Kind) {
1023 case CK_Less:
1024 return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
1025 case CK_Greater:
1026 return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
1027 case CK_Equal:
1028 return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
1029 }
1030 llvm_unreachable("Unrecognised CompareKind enum");
1031 }();
1032
1033 if (ArgTy->hasFloatingRepresentation())
1034 return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
1035 llvm::Twine(InstInfo.Name) + NameSuffix);
1036 if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
1037 auto Inst =
1038 ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
1039 return Builder.CreateICmp(Inst, LHS, RHS,
1040 llvm::Twine(InstInfo.Name) + NameSuffix);
1041 }
1042
1043 llvm_unreachable("unsupported aggregate binary expression should have "
1044 "already been handled");
1045}
1046
1047void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
1048 using llvm::BasicBlock;
1049 using llvm::PHINode;
1050 using llvm::Value;
1051 assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
1052 E->getRHS()->getType()));
1053 const ComparisonCategoryInfo &CmpInfo =
1055 assert(CmpInfo.Record->isTriviallyCopyable() &&
1056 "cannot copy non-trivially copyable aggregate");
1057
1058 QualType ArgTy = E->getLHS()->getType();
1059
1060 if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
1061 !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
1062 !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
1063 return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
1064 }
1065 bool IsComplex = ArgTy->isAnyComplexType();
1066
1067 // Evaluate the operands to the expression and extract their values.
1068 auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
1069 RValue RV = CGF.EmitAnyExpr(E);
1070 if (RV.isScalar())
1071 return {RV.getScalarVal(), nullptr};
1072 if (RV.isAggregate())
1073 return {RV.getAggregatePointer(E->getType(), CGF), nullptr};
1074 assert(RV.isComplex());
1075 return RV.getComplexVal();
1076 };
1077 auto LHSValues = EmitOperand(E->getLHS()),
1078 RHSValues = EmitOperand(E->getRHS());
1079
1080 auto EmitCmp = [&](CompareKind K) {
1081 Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1082 K, IsComplex ? ".r" : "");
1083 if (!IsComplex)
1084 return Cmp;
1085 assert(K == CompareKind::CK_Equal);
1086 Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1087 RHSValues.second, K, ".i");
1088 return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1089 };
1090 auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1091 return Builder.getInt(VInfo->getIntValue());
1092 };
1093
1094 Value *Select;
1095 if (ArgTy->isNullPtrType()) {
1096 Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1097 } else if (!CmpInfo.isPartial()) {
1098 Value *SelectOne =
1099 Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1100 EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1101 Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1102 EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1103 SelectOne, "sel.eq");
1104 } else {
1105 Value *SelectEq = Builder.CreateSelect(
1106 EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1107 EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1108 Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1109 EmitCmpRes(CmpInfo.getGreater()),
1110 SelectEq, "sel.gt");
1111 Select = Builder.CreateSelect(
1112 EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1113 }
1114 // Create the return value in the destination slot.
1115 EnsureDest(E->getType());
1116 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1117
1118 // Emit the address of the first (and only) field in the comparison category
1119 // type, and initialize it from the constant integer value selected above.
1121 DestLV, *CmpInfo.Record->field_begin());
1122 CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1123
1124 // All done! The result is in the Dest slot.
1125}
1126
1127void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1128 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1129 VisitPointerToDataMemberBinaryOperator(E);
1130 else
1131 CGF.ErrorUnsupported(E, "aggregate binary expression");
1132}
1133
1134void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1135 const BinaryOperator *E) {
1137 EmitFinalDestCopy(E->getType(), LV);
1138}
1139
1140/// Is the value of the given expression possibly a reference to or
1141/// into a __block variable?
1142static bool isBlockVarRef(const Expr *E) {
1143 // Make sure we look through parens.
1144 E = E->IgnoreParens();
1145
1146 // Check for a direct reference to a __block variable.
1147 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1148 const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1149 return (var && var->hasAttr<BlocksAttr>());
1150 }
1151
1152 // More complicated stuff.
1153
1154 // Binary operators.
1155 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1156 // For an assignment or pointer-to-member operation, just care
1157 // about the LHS.
1158 if (op->isAssignmentOp() || op->isPtrMemOp())
1159 return isBlockVarRef(op->getLHS());
1160
1161 // For a comma, just care about the RHS.
1162 if (op->getOpcode() == BO_Comma)
1163 return isBlockVarRef(op->getRHS());
1164
1165 // FIXME: pointer arithmetic?
1166 return false;
1167
1168 // Check both sides of a conditional operator.
1169 } else if (const AbstractConditionalOperator *op
1170 = dyn_cast<AbstractConditionalOperator>(E)) {
1171 return isBlockVarRef(op->getTrueExpr())
1172 || isBlockVarRef(op->getFalseExpr());
1173
1174 // OVEs are required to support BinaryConditionalOperators.
1175 } else if (const OpaqueValueExpr *op
1176 = dyn_cast<OpaqueValueExpr>(E)) {
1177 if (const Expr *src = op->getSourceExpr())
1178 return isBlockVarRef(src);
1179
1180 // Casts are necessary to get things like (*(int*)&var) = foo().
1181 // We don't really care about the kind of cast here, except
1182 // we don't want to look through l2r casts, because it's okay
1183 // to get the *value* in a __block variable.
1184 } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1185 if (cast->getCastKind() == CK_LValueToRValue)
1186 return false;
1187 return isBlockVarRef(cast->getSubExpr());
1188
1189 // Handle unary operators. Again, just aggressively look through
1190 // it, ignoring the operation.
1191 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1192 return isBlockVarRef(uop->getSubExpr());
1193
1194 // Look into the base of a field access.
1195 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1196 return isBlockVarRef(mem->getBase());
1197
1198 // Look into the base of a subscript.
1199 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1200 return isBlockVarRef(sub->getBase());
1201 }
1202
1203 return false;
1204}
1205
1206void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1207 // For an assignment to work, the value on the right has
1208 // to be compatible with the value on the left.
1209 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1210 E->getRHS()->getType())
1211 && "Invalid assignment");
1212
1213 // If the LHS might be a __block variable, and the RHS can
1214 // potentially cause a block copy, we need to evaluate the RHS first
1215 // so that the assignment goes the right place.
1216 // This is pretty semantically fragile.
1217 if (isBlockVarRef(E->getLHS()) &&
1218 E->getRHS()->HasSideEffects(CGF.getContext())) {
1219 // Ensure that we have a destination, and evaluate the RHS into that.
1220 EnsureDest(E->getRHS()->getType());
1221 Visit(E->getRHS());
1222
1223 // Now emit the LHS and copy into it.
1224 LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1225
1226 // That copy is an atomic copy if the LHS is atomic.
1227 if (LHS.getType()->isAtomicType() ||
1229 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1230 return;
1231 }
1232
1233 EmitCopy(E->getLHS()->getType(),
1235 needsGC(E->getLHS()->getType()),
1238 Dest);
1239 return;
1240 }
1241
1242 LValue LHS = CGF.EmitLValue(E->getLHS());
1243
1244 // If we have an atomic type, evaluate into the destination and then
1245 // do an atomic copy.
1246 if (LHS.getType()->isAtomicType() ||
1248 EnsureDest(E->getRHS()->getType());
1249 Visit(E->getRHS());
1250 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1251 return;
1252 }
1253
1254 // Codegen the RHS so that it stores directly into the LHS.
1256 LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
1258 // A non-volatile aggregate destination might have volatile member.
1259 if (!LHSSlot.isVolatile() &&
1260 CGF.hasVolatileMember(E->getLHS()->getType()))
1261 LHSSlot.setVolatile(true);
1262
1263 CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1264
1265 // Copy into the destination if the assignment isn't ignored.
1266 EmitFinalDestCopy(E->getType(), LHS);
1267
1268 if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
1271 E->getType());
1272}
1273
1274void AggExprEmitter::
1275VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1276 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1277 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1278 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1279
1280 // Bind the common expression if necessary.
1281 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1282
1283 CodeGenFunction::ConditionalEvaluation eval(CGF);
1284 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1285 CGF.getProfileCount(E));
1286
1287 // Save whether the destination's lifetime is externally managed.
1288 bool isExternallyDestructed = Dest.isExternallyDestructed();
1289 bool destructNonTrivialCStruct =
1290 !isExternallyDestructed &&
1292 isExternallyDestructed |= destructNonTrivialCStruct;
1293 Dest.setExternallyDestructed(isExternallyDestructed);
1294
1295 eval.begin(CGF);
1296 CGF.EmitBlock(LHSBlock);
1299 else
1301 Visit(E->getTrueExpr());
1302 eval.end(CGF);
1303
1304 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1305 CGF.Builder.CreateBr(ContBlock);
1306
1307 // If the result of an agg expression is unused, then the emission
1308 // of the LHS might need to create a destination slot. That's fine
1309 // with us, and we can safely emit the RHS into the same slot, but
1310 // we shouldn't claim that it's already being destructed.
1311 Dest.setExternallyDestructed(isExternallyDestructed);
1312
1313 eval.begin(CGF);
1314 CGF.EmitBlock(RHSBlock);
1317 Visit(E->getFalseExpr());
1318 eval.end(CGF);
1319
1320 if (destructNonTrivialCStruct)
1322 E->getType());
1323
1324 CGF.EmitBlock(ContBlock);
1327}
1328
1329void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1330 Visit(CE->getChosenSubExpr());
1331}
1332
1333void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1334 Address ArgValue = Address::invalid();
1335 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
1336
1337 // If EmitVAArg fails, emit an error.
1338 if (!ArgPtr.isValid()) {
1339 CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1340 return;
1341 }
1342
1343 EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
1344}
1345
1346void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1347 // Ensure that we have a slot, but if we already do, remember
1348 // whether it was externally destructed.
1349 bool wasExternallyDestructed = Dest.isExternallyDestructed();
1350 EnsureDest(E->getType());
1351
1352 // We're going to push a destructor if there isn't already one.
1354
1355 Visit(E->getSubExpr());
1356
1357 // Push that destructor we promised.
1358 if (!wasExternallyDestructed)
1359 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1360}
1361
1362void
1363AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1364 AggValueSlot Slot = EnsureSlot(E->getType());
1365 CGF.EmitCXXConstructExpr(E, Slot);
1366}
1367
1368void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1369 const CXXInheritedCtorInitExpr *E) {
1370 AggValueSlot Slot = EnsureSlot(E->getType());
1372 E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1373 E->inheritedFromVBase(), E);
1374}
1375
1376void
1377AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1378 AggValueSlot Slot = EnsureSlot(E->getType());
1379 LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1380
1381 // We'll need to enter cleanup scopes in case any of the element
1382 // initializers throws an exception or contains branch out of the expressions.
1383 CodeGenFunction::CleanupDeactivationScope scope(CGF);
1384
1387 e = E->capture_init_end();
1388 i != e; ++i, ++CurField) {
1389 // Emit initialization
1390 LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1391 if (CurField->hasCapturedVLAType()) {
1392 CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1393 continue;
1394 }
1395
1396 EmitInitializationToLValue(*i, LV);
1397
1398 // Push a destructor if necessary.
1399 if (QualType::DestructionKind DtorKind =
1400 CurField->getType().isDestructedType()) {
1401 assert(LV.isSimple());
1402 if (DtorKind)
1404 NormalAndEHCleanup, LV.getAddress(CGF), CurField->getType(),
1405 CGF.getDestroyer(DtorKind), false);
1406 }
1407 }
1408}
1409
1410void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1411 CodeGenFunction::RunCleanupsScope cleanups(CGF);
1412 Visit(E->getSubExpr());
1413}
1414
1415void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1416 QualType T = E->getType();
1417 AggValueSlot Slot = EnsureSlot(T);
1418 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1419}
1420
1421void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1422 QualType T = E->getType();
1423 AggValueSlot Slot = EnsureSlot(T);
1424 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1425}
1426
1427/// Determine whether the given cast kind is known to always convert values
1428/// with all zero bits in their value representation to values with all zero
1429/// bits in their value representation.
1430static bool castPreservesZero(const CastExpr *CE) {
1431 switch (CE->getCastKind()) {
1432 // No-ops.
1433 case CK_NoOp:
1434 case CK_UserDefinedConversion:
1435 case CK_ConstructorConversion:
1436 case CK_BitCast:
1437 case CK_ToUnion:
1438 case CK_ToVoid:
1439 // Conversions between (possibly-complex) integral, (possibly-complex)
1440 // floating-point, and bool.
1441 case CK_BooleanToSignedIntegral:
1442 case CK_FloatingCast:
1443 case CK_FloatingComplexCast:
1444 case CK_FloatingComplexToBoolean:
1445 case CK_FloatingComplexToIntegralComplex:
1446 case CK_FloatingComplexToReal:
1447 case CK_FloatingRealToComplex:
1448 case CK_FloatingToBoolean:
1449 case CK_FloatingToIntegral:
1450 case CK_IntegralCast:
1451 case CK_IntegralComplexCast:
1452 case CK_IntegralComplexToBoolean:
1453 case CK_IntegralComplexToFloatingComplex:
1454 case CK_IntegralComplexToReal:
1455 case CK_IntegralRealToComplex:
1456 case CK_IntegralToBoolean:
1457 case CK_IntegralToFloating:
1458 // Reinterpreting integers as pointers and vice versa.
1459 case CK_IntegralToPointer:
1460 case CK_PointerToIntegral:
1461 // Language extensions.
1462 case CK_VectorSplat:
1463 case CK_MatrixCast:
1464 case CK_NonAtomicToAtomic:
1465 case CK_AtomicToNonAtomic:
1466 case CK_HLSLVectorTruncation:
1467 return true;
1468
1469 case CK_BaseToDerivedMemberPointer:
1470 case CK_DerivedToBaseMemberPointer:
1471 case CK_MemberPointerToBoolean:
1472 case CK_NullToMemberPointer:
1473 case CK_ReinterpretMemberPointer:
1474 // FIXME: ABI-dependent.
1475 return false;
1476
1477 case CK_AnyPointerToBlockPointerCast:
1478 case CK_BlockPointerToObjCPointerCast:
1479 case CK_CPointerToObjCPointerCast:
1480 case CK_ObjCObjectLValueCast:
1481 case CK_IntToOCLSampler:
1482 case CK_ZeroToOCLOpaqueType:
1483 // FIXME: Check these.
1484 return false;
1485
1486 case CK_FixedPointCast:
1487 case CK_FixedPointToBoolean:
1488 case CK_FixedPointToFloating:
1489 case CK_FixedPointToIntegral:
1490 case CK_FloatingToFixedPoint:
1491 case CK_IntegralToFixedPoint:
1492 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1493 return false;
1494
1495 case CK_AddressSpaceConversion:
1496 case CK_BaseToDerived:
1497 case CK_DerivedToBase:
1498 case CK_Dynamic:
1499 case CK_NullToPointer:
1500 case CK_PointerToBoolean:
1501 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1502 // same representation in all involved address spaces.
1503 return false;
1504
1505 case CK_ARCConsumeObject:
1506 case CK_ARCExtendBlockObject:
1507 case CK_ARCProduceObject:
1508 case CK_ARCReclaimReturnedObject:
1509 case CK_CopyAndAutoreleaseBlockObject:
1510 case CK_ArrayToPointerDecay:
1511 case CK_FunctionToPointerDecay:
1512 case CK_BuiltinFnToFnPtr:
1513 case CK_Dependent:
1514 case CK_LValueBitCast:
1515 case CK_LValueToRValue:
1516 case CK_LValueToRValueBitCast:
1517 case CK_UncheckedDerivedToBase:
1518 case CK_HLSLArrayRValue:
1519 return false;
1520 }
1521 llvm_unreachable("Unhandled clang::CastKind enum");
1522}
1523
1524/// isSimpleZero - If emitting this value will obviously just cause a store of
1525/// zero to memory, return true. This can return false if uncertain, so it just
1526/// handles simple cases.
1527static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1528 E = E->IgnoreParens();
1529 while (auto *CE = dyn_cast<CastExpr>(E)) {
1530 if (!castPreservesZero(CE))
1531 break;
1532 E = CE->getSubExpr()->IgnoreParens();
1533 }
1534
1535 // 0
1536 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1537 return IL->getValue() == 0;
1538 // +0.0
1539 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1540 return FL->getValue().isPosZero();
1541 // int()
1542 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1544 return true;
1545 // (int*)0 - Null pointer expressions.
1546 if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1547 return ICE->getCastKind() == CK_NullToPointer &&
1549 !E->HasSideEffects(CGF.getContext());
1550 // '\0'
1551 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1552 return CL->getValue() == 0;
1553
1554 // Otherwise, hard case: conservatively return false.
1555 return false;
1556}
1557
1558
1559void
1560AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1561 QualType type = LV.getType();
1562 // FIXME: Ignore result?
1563 // FIXME: Are initializers affected by volatile?
1564 if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1565 // Storing "i32 0" to a zero'd memory location is a noop.
1566 return;
1567 } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1568 return EmitNullInitializationToLValue(LV);
1569 } else if (isa<NoInitExpr>(E)) {
1570 // Do nothing.
1571 return;
1572 } else if (type->isReferenceType()) {
1574 return CGF.EmitStoreThroughLValue(RV, LV);
1575 }
1576
1577 switch (CGF.getEvaluationKind(type)) {
1578 case TEK_Complex:
1579 CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1580 return;
1581 case TEK_Aggregate:
1582 CGF.EmitAggExpr(
1587 return;
1588 case TEK_Scalar:
1589 if (LV.isSimple()) {
1590 CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1591 } else {
1593 }
1594 return;
1595 }
1596 llvm_unreachable("bad evaluation kind");
1597}
1598
1599void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1600 QualType type = lv.getType();
1601
1602 // If the destination slot is already zeroed out before the aggregate is
1603 // copied into it, we don't have to emit any zeros here.
1604 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1605 return;
1606
1607 if (CGF.hasScalarEvaluationKind(type)) {
1608 // For non-aggregates, we can store the appropriate null constant.
1609 llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1610 // Note that the following is not equivalent to
1611 // EmitStoreThroughBitfieldLValue for ARC types.
1612 if (lv.isBitField()) {
1614 } else {
1615 assert(lv.isSimple());
1616 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1617 }
1618 } else {
1619 // There's a potential optimization opportunity in combining
1620 // memsets; that would be easy for arrays, but relatively
1621 // difficult for structures with the current code.
1622 CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType());
1623 }
1624}
1625
1626void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
1627 VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
1629 E->getArrayFiller());
1630}
1631
1632void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1633 if (E->hadArrayRangeDesignator())
1634 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1635
1636 if (E->isTransparent())
1637 return Visit(E->getInit(0));
1638
1639 VisitCXXParenListOrInitListExpr(
1640 E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller());
1641}
1642
1643void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1644 Expr *ExprToVisit, ArrayRef<Expr *> InitExprs,
1645 FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) {
1646#if 0
1647 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1648 // (Length of globals? Chunks of zeroed-out space?).
1649 //
1650 // If we can, prefer a copy from a global; this is a lot less code for long
1651 // globals, and it's easier for the current optimizers to analyze.
1652 if (llvm::Constant *C =
1653 CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) {
1654 llvm::GlobalVariable* GV =
1655 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1656 llvm::GlobalValue::InternalLinkage, C, "");
1657 EmitFinalDestCopy(ExprToVisit->getType(),
1658 CGF.MakeAddrLValue(GV, ExprToVisit->getType()));
1659 return;
1660 }
1661#endif
1662
1663 AggValueSlot Dest = EnsureSlot(ExprToVisit->getType());
1664
1665 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType());
1666
1667 // Handle initialization of an array.
1668 if (ExprToVisit->getType()->isConstantArrayType()) {
1669 auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1670 EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit,
1671 InitExprs, ArrayFiller);
1672 return;
1673 } else if (ExprToVisit->getType()->isVariableArrayType()) {
1674 // A variable array type that has an initializer can only do empty
1675 // initialization. And because this feature is not exposed as an extension
1676 // in C++, we can safely memset the array memory to zero.
1677 assert(InitExprs.size() == 0 &&
1678 "you can only use an empty initializer with VLAs");
1679 CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType());
1680 return;
1681 }
1682
1683 assert(ExprToVisit->getType()->isRecordType() &&
1684 "Only support structs/unions here!");
1685
1686 // Do struct initialization; this code just sets each individual member
1687 // to the approprate value. This makes bitfield support automatic;
1688 // the disadvantage is that the generated code is more difficult for
1689 // the optimizer, especially with bitfields.
1690 unsigned NumInitElements = InitExprs.size();
1691 RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl();
1692
1693 // We'll need to enter cleanup scopes in case any of the element
1694 // initializers throws an exception.
1696 CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF);
1697
1698 unsigned curInitIndex = 0;
1699
1700 // Emit initialization of base classes.
1701 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1702 assert(NumInitElements >= CXXRD->getNumBases() &&
1703 "missing initializer for base class");
1704 for (auto &Base : CXXRD->bases()) {
1705 assert(!Base.isVirtual() && "should not see vbases here");
1706 auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1708 Dest.getAddress(), CXXRD, BaseRD,
1709 /*isBaseVirtual*/ false);
1711 V, Qualifiers(),
1715 CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1716 CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
1717
1718 if (QualType::DestructionKind dtorKind =
1719 Base.getType().isDestructedType())
1720 CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType());
1721 }
1722 }
1723
1724 // Prepare a 'this' for CXXDefaultInitExprs.
1725 CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1726
1727 if (record->isUnion()) {
1728 // Only initialize one field of a union. The field itself is
1729 // specified by the initializer list.
1730 if (!InitializedFieldInUnion) {
1731 // Empty union; we have nothing to do.
1732
1733#ifndef NDEBUG
1734 // Make sure that it's really an empty and not a failure of
1735 // semantic analysis.
1736 for (const auto *Field : record->fields())
1737 assert(
1738 (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) &&
1739 "Only unnamed bitfields or anonymous class allowed");
1740#endif
1741 return;
1742 }
1743
1744 // FIXME: volatility
1745 FieldDecl *Field = InitializedFieldInUnion;
1746
1747 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1748 if (NumInitElements) {
1749 // Store the initializer into the field
1750 EmitInitializationToLValue(InitExprs[0], FieldLoc);
1751 } else {
1752 // Default-initialize to null.
1753 EmitNullInitializationToLValue(FieldLoc);
1754 }
1755
1756 return;
1757 }
1758
1759 // Here we iterate over the fields; this makes it simpler to both
1760 // default-initialize fields and skip over unnamed fields.
1761 for (const auto *field : record->fields()) {
1762 // We're done once we hit the flexible array member.
1763 if (field->getType()->isIncompleteArrayType())
1764 break;
1765
1766 // Always skip anonymous bitfields.
1767 if (field->isUnnamedBitField())
1768 continue;
1769
1770 // We're done if we reach the end of the explicit initializers, we
1771 // have a zeroed object, and the rest of the fields are
1772 // zero-initializable.
1773 if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1774 CGF.getTypes().isZeroInitializable(ExprToVisit->getType()))
1775 break;
1776
1777
1778 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1779 // We never generate write-barries for initialized fields.
1780 LV.setNonGC(true);
1781
1782 if (curInitIndex < NumInitElements) {
1783 // Store the initializer into the field.
1784 EmitInitializationToLValue(InitExprs[curInitIndex++], LV);
1785 } else {
1786 // We're out of initializers; default-initialize to null
1787 EmitNullInitializationToLValue(LV);
1788 }
1789
1790 // Push a destructor if necessary.
1791 // FIXME: if we have an array of structures, all explicitly
1792 // initialized, we can end up pushing a linear number of cleanups.
1793 bool pushedCleanup = false;
1794 if (QualType::DestructionKind dtorKind
1795 = field->getType().isDestructedType()) {
1796 assert(LV.isSimple());
1797 if (dtorKind) {
1799 NormalAndEHCleanup, LV.getAddress(CGF), field->getType(),
1800 CGF.getDestroyer(dtorKind), false);
1801 pushedCleanup = true;
1802 }
1803 }
1804
1805 // If the GEP didn't get used because of a dead zero init or something
1806 // else, clean it up for -O0 builds and general tidiness.
1807 if (!pushedCleanup && LV.isSimple())
1808 if (llvm::GetElementPtrInst *GEP =
1809 dyn_cast<llvm::GetElementPtrInst>(LV.emitRawPointer(CGF)))
1810 if (GEP->use_empty())
1811 GEP->eraseFromParent();
1812 }
1813}
1814
1815void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1816 llvm::Value *outerBegin) {
1817 // Emit the common subexpression.
1818 CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1819
1820 Address destPtr = EnsureSlot(E->getType()).getAddress();
1821 uint64_t numElements = E->getArraySize().getZExtValue();
1822
1823 if (!numElements)
1824 return;
1825
1826 // destPtr is an array*. Construct an elementType* by drilling down a level.
1827 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1828 llvm::Value *indices[] = {zero, zero};
1829 llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),
1830 destPtr.emitRawPointer(CGF),
1831 indices, "arrayinit.begin");
1832
1833 // Prepare to special-case multidimensional array initialization: we avoid
1834 // emitting multiple destructor loops in that case.
1835 if (!outerBegin)
1836 outerBegin = begin;
1837 ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1838
1839 QualType elementType =
1841 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1842 CharUnits elementAlign =
1843 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1844 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
1845
1846 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1847 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1848
1849 // Jump into the body.
1850 CGF.EmitBlock(bodyBB);
1851 llvm::PHINode *index =
1852 Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1853 index->addIncoming(zero, entryBB);
1854 llvm::Value *element =
1855 Builder.CreateInBoundsGEP(llvmElementType, begin, index);
1856
1857 // Prepare for a cleanup.
1858 QualType::DestructionKind dtorKind = elementType.isDestructedType();
1860 if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1861 if (outerBegin->getType() != element->getType())
1862 outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1863 CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1864 elementAlign,
1865 CGF.getDestroyer(dtorKind));
1867 } else {
1868 dtorKind = QualType::DK_none;
1869 }
1870
1871 // Emit the actual filler expression.
1872 {
1873 // Temporaries created in an array initialization loop are destroyed
1874 // at the end of each iteration.
1875 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1876 CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1877 LValue elementLV = CGF.MakeAddrLValue(
1878 Address(element, llvmElementType, elementAlign), elementType);
1879
1880 if (InnerLoop) {
1881 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1882 auto elementSlot = AggValueSlot::forLValue(
1883 elementLV, CGF, AggValueSlot::IsDestructed,
1886 AggExprEmitter(CGF, elementSlot, false)
1887 .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1888 } else
1889 EmitInitializationToLValue(E->getSubExpr(), elementLV);
1890 }
1891
1892 // Move on to the next element.
1893 llvm::Value *nextIndex = Builder.CreateNUWAdd(
1894 index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1895 index->addIncoming(nextIndex, Builder.GetInsertBlock());
1896
1897 // Leave the loop if we're done.
1898 llvm::Value *done = Builder.CreateICmpEQ(
1899 nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1900 "arrayinit.done");
1901 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1902 Builder.CreateCondBr(done, endBB, bodyBB);
1903
1904 CGF.EmitBlock(endBB);
1905
1906 // Leave the partial-array cleanup if we entered one.
1907 if (dtorKind)
1908 CGF.DeactivateCleanupBlock(cleanup, index);
1909}
1910
1911void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1912 AggValueSlot Dest = EnsureSlot(E->getType());
1913
1914 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1915 EmitInitializationToLValue(E->getBase(), DestLV);
1916 VisitInitListExpr(E->getUpdater());
1917}
1918
1919//===----------------------------------------------------------------------===//
1920// Entry Points into this File
1921//===----------------------------------------------------------------------===//
1922
1923/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1924/// non-zero bytes that will be stored when outputting the initializer for the
1925/// specified initializer expression.
1927 if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1928 E = MTE->getSubExpr();
1929 E = E->IgnoreParenNoopCasts(CGF.getContext());
1930
1931 // 0 and 0.0 won't require any non-zero stores!
1932 if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1933
1934 // If this is an initlist expr, sum up the size of sizes of the (present)
1935 // elements. If this is something weird, assume the whole thing is non-zero.
1936 const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1937 while (ILE && ILE->isTransparent())
1938 ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
1939 if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1940 return CGF.getContext().getTypeSizeInChars(E->getType());
1941
1942 // InitListExprs for structs have to be handled carefully. If there are
1943 // reference members, we need to consider the size of the reference, not the
1944 // referencee. InitListExprs for unions and arrays can't have references.
1945 if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1946 if (!RT->isUnionType()) {
1947 RecordDecl *SD = RT->getDecl();
1948 CharUnits NumNonZeroBytes = CharUnits::Zero();
1949
1950 unsigned ILEElement = 0;
1951 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1952 while (ILEElement != CXXRD->getNumBases())
1953 NumNonZeroBytes +=
1954 GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1955 for (const auto *Field : SD->fields()) {
1956 // We're done once we hit the flexible array member or run out of
1957 // InitListExpr elements.
1958 if (Field->getType()->isIncompleteArrayType() ||
1959 ILEElement == ILE->getNumInits())
1960 break;
1961 if (Field->isUnnamedBitField())
1962 continue;
1963
1964 const Expr *E = ILE->getInit(ILEElement++);
1965
1966 // Reference values are always non-null and have the width of a pointer.
1967 if (Field->getType()->isReferenceType())
1968 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1969 CGF.getTarget().getPointerWidth(LangAS::Default));
1970 else
1971 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1972 }
1973
1974 return NumNonZeroBytes;
1975 }
1976 }
1977
1978 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
1979 CharUnits NumNonZeroBytes = CharUnits::Zero();
1980 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1981 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1982 return NumNonZeroBytes;
1983}
1984
1985/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1986/// zeros in it, emit a memset and avoid storing the individual zeros.
1987///
1988static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1989 CodeGenFunction &CGF) {
1990 // If the slot is already known to be zeroed, nothing to do. Don't mess with
1991 // volatile stores.
1992 if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1993 return;
1994
1995 // C++ objects with a user-declared constructor don't need zero'ing.
1996 if (CGF.getLangOpts().CPlusPlus)
1997 if (const RecordType *RT = CGF.getContext()
1999 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
2001 return;
2002 }
2003
2004 // If the type is 16-bytes or smaller, prefer individual stores over memset.
2005 CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
2006 if (Size <= CharUnits::fromQuantity(16))
2007 return;
2008
2009 // Check to see if over 3/4 of the initializer are known to be zero. If so,
2010 // we prefer to emit memset + individual stores for the rest.
2011 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
2012 if (NumNonZeroBytes*4 > Size)
2013 return;
2014
2015 // Okay, it seems like a good idea to use an initial memset, emit the call.
2016 llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
2017
2019 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
2020
2021 // Tell the AggExprEmitter that the slot is known zero.
2022 Slot.setZeroed();
2023}
2024
2025
2026
2027
2028/// EmitAggExpr - Emit the computation of the specified expression of aggregate
2029/// type. The result is computed into DestPtr. Note that if DestPtr is null,
2030/// the value of the aggregate expression is not needed. If VolatileDest is
2031/// true, DestPtr cannot be 0.
2032void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
2033 assert(E && hasAggregateEvaluationKind(E->getType()) &&
2034 "Invalid aggregate expression to emit");
2035 assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
2036 "slot has bits but no address");
2037
2038 // Optimize the slot if possible.
2039 CheckAggExprForMemSetUse(Slot, E, *this);
2040
2041 AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
2042}
2043
2045 assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
2046 Address Temp = CreateMemTemp(E->getType());
2047 LValue LV = MakeAddrLValue(Temp, E->getType());
2052 return LV;
2053}
2054
2057 if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
2059
2060 // If the field lies entirely within the enclosing class's nvsize, its tail
2061 // padding cannot overlap any already-initialized object. (The only subobjects
2062 // with greater addresses that might already be initialized are vbases.)
2063 const RecordDecl *ClassRD = FD->getParent();
2064 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
2065 if (Layout.getFieldOffset(FD->getFieldIndex()) +
2066 getContext().getTypeSize(FD->getType()) <=
2067 (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
2069
2070 // The tail padding may contain values we need to preserve.
2072}
2073
2075 const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
2076 // If the most-derived object is a field declared with [[no_unique_address]],
2077 // the tail padding of any virtual base could be reused for other subobjects
2078 // of that field's class.
2079 if (IsVirtual)
2081
2082 // If the base class is laid out entirely within the nvsize of the derived
2083 // class, its tail padding cannot yet be initialized, so we can issue
2084 // stores at the full width of the base class.
2085 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2086 if (Layout.getBaseClassOffset(BaseRD) +
2087 getContext().getASTRecordLayout(BaseRD).getSize() <=
2088 Layout.getNonVirtualSize())
2090
2091 // The tail padding may contain values we need to preserve.
2093}
2094
2096 AggValueSlot::Overlap_t MayOverlap,
2097 bool isVolatile) {
2098 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
2099
2100 Address DestPtr = Dest.getAddress(*this);
2101 Address SrcPtr = Src.getAddress(*this);
2102
2103 if (getLangOpts().CPlusPlus) {
2104 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2105 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
2106 assert((Record->hasTrivialCopyConstructor() ||
2107 Record->hasTrivialCopyAssignment() ||
2108 Record->hasTrivialMoveConstructor() ||
2109 Record->hasTrivialMoveAssignment() ||
2110 Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
2111 "Trying to aggregate-copy a type without a trivial copy/move "
2112 "constructor or assignment operator");
2113 // Ignore empty classes in C++.
2114 if (Record->isEmpty())
2115 return;
2116 }
2117 }
2118
2119 if (getLangOpts().CUDAIsDevice) {
2121 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,
2122 Src))
2123 return;
2124 } else if (Ty->isCUDADeviceBuiltinTextureType()) {
2125 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,
2126 Src))
2127 return;
2128 }
2129 }
2130
2131 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2132 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2133 // read from another object that overlaps in anyway the storage of the first
2134 // object, then the overlap shall be exact and the two objects shall have
2135 // qualified or unqualified versions of a compatible type."
2136 //
2137 // memcpy is not defined if the source and destination pointers are exactly
2138 // equal, but other compilers do this optimization, and almost every memcpy
2139 // implementation handles this case safely. If there is a libc that does not
2140 // safely handle this, we can add a target hook.
2141
2142 // Get data size info for this aggregate. Don't copy the tail padding if this
2143 // might be a potentially-overlapping subobject, since the tail padding might
2144 // be occupied by a different object. Otherwise, copying it is fine.
2146 if (MayOverlap)
2148 else
2150
2151 llvm::Value *SizeVal = nullptr;
2152 if (TypeInfo.Width.isZero()) {
2153 // But note that getTypeInfo returns 0 for a VLA.
2154 if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
2155 getContext().getAsArrayType(Ty))) {
2156 QualType BaseEltTy;
2157 SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
2158 TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
2159 assert(!TypeInfo.Width.isZero());
2160 SizeVal = Builder.CreateNUWMul(
2161 SizeVal,
2162 llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));
2163 }
2164 }
2165 if (!SizeVal) {
2166 SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());
2167 }
2168
2169 // FIXME: If we have a volatile struct, the optimizer can remove what might
2170 // appear to be `extra' memory ops:
2171 //
2172 // volatile struct { int i; } a, b;
2173 //
2174 // int main() {
2175 // a = b;
2176 // a = b;
2177 // }
2178 //
2179 // we need to use a different call here. We use isVolatile to indicate when
2180 // either the source or the destination is volatile.
2181
2182 DestPtr = DestPtr.withElementType(Int8Ty);
2183 SrcPtr = SrcPtr.withElementType(Int8Ty);
2184
2185 // Don't do any of the memmove_collectable tests if GC isn't set.
2186 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
2187 // fall through
2188 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
2189 RecordDecl *Record = RecordTy->getDecl();
2190 if (Record->hasObjectMember()) {
2191 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2192 SizeVal);
2193 return;
2194 }
2195 } else if (Ty->isArrayType()) {
2196 QualType BaseType = getContext().getBaseElementType(Ty);
2197 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
2198 if (RecordTy->getDecl()->hasObjectMember()) {
2199 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2200 SizeVal);
2201 return;
2202 }
2203 }
2204 }
2205
2206 auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2207
2208 // Determine the metadata to describe the position of any padding in this
2209 // memcpy, as well as the TBAA tags for the members of the struct, in case
2210 // the optimizer wishes to expand it in to scalar memory operations.
2211 if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2212 Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2213
2214 if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2216 Dest.getTBAAInfo(), Src.getTBAAInfo());
2217 CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2218 }
2219}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3285
CompareKind
Definition: CGExprAgg.cpp:991
@ CK_Greater
Definition: CGExprAgg.cpp:993
@ CK_Less
Definition: CGExprAgg.cpp:992
@ CK_Equal
Definition: CGExprAgg.cpp:994
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF)
GetNumNonZeroBytesInInit - Get an approximate count of the number of non-zero bytes that will be stor...
Definition: CGExprAgg.cpp:1926
static Expr * findPeephole(Expr *op, CastKind kind, const ASTContext &ctx)
Attempt to look through various unimportant expressions to find a cast of the given kind.
Definition: CGExprAgg.cpp:730
static bool isBlockVarRef(const Expr *E)
Is the value of the given expression possibly a reference to or into a __block variable?
Definition: CGExprAgg.cpp:1142
static bool isTrivialFiller(Expr *E)
Determine if E is a trivial array filler, that is, one that is equivalent to zero-initialization.
Definition: CGExprAgg.cpp:486
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF)
isSimpleZero - If emitting this value will obviously just cause a store of zero to memory,...
Definition: CGExprAgg.cpp:1527
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
Definition: CGExprAgg.cpp:997
static bool castPreservesZero(const CastExpr *CE)
Determine whether the given cast kind is known to always convert values with all zero bits in their v...
Definition: CGExprAgg.cpp:1430
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, CodeGenFunction &CGF)
CheckAggExprForMemSetUse - If the initializer is large and has a lot of zeros in it,...
Definition: CGExprAgg.cpp:1988
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the C++ template declaration subclasses.
llvm::MachO::Record Record
Definition: MachO.h:31
SourceLocation Loc
Definition: SemaObjC.cpp:755
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2768
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
bool hasSameType(QualType T1, QualType T2) const
Determine whether the given types T1 and T2 are equivalent.
Definition: ASTContext.h:2591
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
ComparisonCategories CompCategories
Types and expressions required to build C++2a three-way comparisons using operator<=>,...
Definition: ASTContext.h:2272
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
QualType removeAddrSpaceQualType(QualType T) const
Remove any existing address space on the type and returns the type with qualifiers intact (or that's ...
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
TypeInfoChars getTypeInfoInChars(const Type *T) const
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2618
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
unsigned getTargetAddressSpace(LangAS AS) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
Definition: RecordLayout.h:210
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4141
Expr * getCond() const
getCond - Return the expression representing the condition for the ?: operator.
Definition: Expr.h:4319
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition: Expr.h:4325
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition: Expr.h:4331
Represents a loop initializing the elements of an array.
Definition: Expr.h:5511
llvm::APInt getArraySize() const
Definition: Expr.h:5533
OpaqueValueExpr * getCommonExpr() const
Get the common subexpression shared by all initializations (the source array).
Definition: Expr.h:5526
Expr * getSubExpr() const
Get the initializer to use for each array element.
Definition: Expr.h:5531
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2664
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3518
QualType getElementType() const
Definition: Type.h:3530
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6437
QualType getValueType() const
Gets the type contained by this atomic type, i.e.
Definition: Type.h:7189
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3840
Expr * getLHS() const
Definition: Expr.h:3889
Expr * getRHS() const
Definition: Expr.h:3891
Opcode getOpcode() const
Definition: Expr.h:3884
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1487
CXXTemporary * getTemporary()
Definition: ExprCXX.h:1505
const Expr * getSubExpr() const
Definition: ExprCXX.h:1509
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1542
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1264
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1371
Expr * getExpr()
Get the initialization expression that will be used.
Definition: ExprCXX.cpp:1035
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition: ExprCXX.h:1733
bool constructsVBase() const
Determine whether this constructor is actually constructing a base class (rather than a complete obje...
Definition: ExprCXX.h:1774
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will call.
Definition: ExprCXX.h:1770
bool inheritedFromVBase() const
Determine whether the inherited constructor is inherited from a virtual base of the object we constru...
Definition: ExprCXX.h:1784
Represents a list-initialization with parenthesis.
Definition: ExprCXX.h:4944
ArrayRef< Expr * > getInitExprs()
Definition: ExprCXX.h:4984
FieldDecl * getInitializedFieldInUnion()
Definition: ExprCXX.h:5024
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool isTriviallyCopyable() const
Determine whether this class is considered trivially copyable per (C++11 [class]p6).
Definition: DeclCXX.cpp:576
bool hasUserDeclaredConstructor() const
Determine whether this class has any user-declared constructors.
Definition: DeclCXX.h:791
A rewritten comparison expression that was originally written using operator syntax.
Definition: ExprCXX.h:283
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition: ExprCXX.h:301
An expression "T()" which creates a value-initialized rvalue of type T, which is a non-class type.
Definition: ExprCXX.h:2177
Implicit construction of a std::initializer_list<T> object from an array temporary within list-initia...
Definition: ExprCXX.h:797
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1202
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2820
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition: Expr.cpp:1590
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3483
CastKind getCastKind() const
Definition: Expr.h:3527
Expr * getSubExpr()
Definition: Expr.h:3533
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
ChooseExpr - GNU builtin-in function __builtin_choose_expr.
Definition: Expr.h:4558
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition: Expr.h:4594
Represents a 'co_await' expression.
Definition: ExprCXX.h:5175
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
static Address invalid()
Definition: Address.h:153
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:220
CharUnits getAlignment() const
Definition: Address.h:166
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:184
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
bool isValid() const
Definition: Address.h:154
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:176
An aggregate value slot.
Definition: CGValue.h:512
void setVolatile(bool flag)
Definition: CGValue.h:631
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:580
Address getAddress() const
Definition: CGValue.h:652
CharUnits getPreferredSize(ASTContext &Ctx, QualType Type) const
Get the preferred size to use when storing a value to this slot.
Definition: CGValue.h:690
static AggValueSlot forLValue(const LValue &LV, CodeGenFunction &CGF, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:610
NeedsGCBarriers_t requiresGCollection() const
Definition: CGValue.h:642
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:621
void setZeroed(bool V=true)
Definition: CGValue.h:682
IsZeroed_t isZeroed() const
Definition: CGValue.h:683
Qualifiers getQualifiers() const
Definition: CGValue.h:625
IsAliased_t isPotentiallyAliased() const
Definition: CGValue.h:662
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:595
IsDestructed_t isExternallyDestructed() const
Definition: CGValue.h:618
Overlap_t mayOverlap() const
Definition: CGValue.h:666
RValue asRValue() const
Definition: CGValue.h:674
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:648
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:824
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:397
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:364
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:87
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr, llvm::Value *Size)=0
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
static bool hasScalarEvaluationKind(QualType T)
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual)
Determine whether a base class initialization may overlap some other object.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
void callCStructCopyAssignmentOperator(LValue Dst, LValue Src)
void callCStructMoveConstructor(LValue Dst, LValue Src)
llvm::SmallVector< DeferredDeactivateCleanup > DeferredDeactivationCleanupStack
void callCStructCopyConstructor(LValue Dst, LValue Src)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E)
Emit a call to a constructor inherited from a base class, passing the current constructor's arguments...
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
const TargetInfo & getTarget() const
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD)
Determine whether a field initialization may overlap some other object.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile)
Build all the stores needed to initialize an aggregate at Dest with the value Val.
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Address GetAddressOfDirectBaseInCompleteClass(Address Value, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual)
GetAddressOfBaseOfCompleteClass - Convert the given pointer to a complete class to the given direct b...
void callCStructMoveAssignmentOperator(LValue Dst, LValue Src)
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
CleanupKind getCleanupKind(QualType::DestructionKind kind)
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
llvm::Type * ConvertType(QualType T)
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
CodeGenTypes & getTypes() const
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot())
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitAtomicExpr(AtomicExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1241
llvm::MDNode * getTBAAStructInfo(QualType QTy)
llvm::Module & getModule() const
bool isPaddedAtomicType(QualType type)
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
TBAAAccessInfo mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo, TBAAAccessInfo SrcInfo)
mergeTBAAInfoForMemoryTransfer - Get merged TBAA information for the purposes of memory transfer call...
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
bool isPointerZeroInitializable(QualType T)
Check if the pointer type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
Definition: CGCleanup.h:639
LValue - This represents an lvalue references.
Definition: CGValue.h:181
bool isBitField() const
Definition: CGValue.h:283
bool isSimple() const
Definition: CGValue.h:281
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:370
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:365
QualType getType() const
Definition: CGValue.h:294
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:338
void setNonGC(bool Value)
Definition: CGValue.h:307
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:41
llvm::Value * getAggregatePointer(QualType PointeeType, CodeGenFunction &CGF) const
Definition: CGValue.h:87
bool isScalar() const
Definition: CGValue.h:63
static RValue get(llvm::Value *V)
Definition: CGValue.h:97
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:124
bool isAggregate() const
Definition: CGValue.h:65
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:82
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:70
bool isComplex() const
Definition: CGValue.h:64
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:77
An abstract representation of an aligned address.
Definition: Address.h:41
llvm::Value * getPointer() const
Definition: Address.h:65
static RawAddress invalid()
Definition: Address.h:60
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:356
const ComparisonCategoryInfo & getInfoForType(QualType Ty) const
Return the comparison category information as specified by getCategoryForType(Ty).
bool isPartial() const
True iff the comparison is not totally ordered.
const ValueInfo * getLess() const
const ValueInfo * getUnordered() const
const CXXRecordDecl * Record
The declaration for the comparison category type from the standard library.
const ValueInfo * getGreater() const
const ValueInfo * getEqualOrEquiv() const
Complex values, per C99 6.2.5p11.
Definition: Type.h:3086
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3413
const Expr * getInitializer() const
Definition: Expr.h:3436
Represents the canonical version of C arrays with a specified constant size.
Definition: Type.h:3556
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1072
Represents a 'co_yield' expression.
Definition: ExprCXX.h:5256
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2342
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1260
bool hasAttr() const
Definition: DeclBase.h:583
Expr * getBase() const
Definition: Expr.h:5476
InitListExpr * getUpdater() const
Definition: Expr.h:5479
Represents an expression – generally a full-expression – that introduces cleanups to be run at the en...
Definition: ExprCXX.h:3467
This represents one expression.
Definition: Expr.h:110
bool isGLValue() const
Definition: Expr.h:280
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3086
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3055
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3556
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3057
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.cpp:4644
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3270
const Expr * getSubExpr() const
Definition: Expr.h:1052
Represents a C11 generic selection.
Definition: Expr.h:5725
Represents an implicitly-generated value initialization of an object of a given type.
Definition: Expr.h:5600
Describes an C or C++ initializer list.
Definition: Expr.h:4847
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition: Expr.cpp:2432
FieldDecl * getInitializedFieldInUnion()
If this initializes a union, specifies which field in the union to initialize.
Definition: Expr.h:4966
unsigned getNumInits() const
Definition: Expr.h:4877
bool hadArrayRangeDesignator() const
Definition: Expr.h:5024
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition: Expr.h:4941
const Expr * getInit(unsigned Init) const
Definition: Expr.h:4893
ArrayRef< Expr * > inits()
Definition: Expr.h:4887
A C++ lambda expression, which produces a function object (of unspecified type) that can be invoked l...
Definition: ExprCXX.h:1950
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument for this lambda expression.
Definition: ExprCXX.h:2088
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: ExprCXX.h:2062
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument for this lambda expression (which initializes the first ca...
Definition: ExprCXX.h:2076
CXXRecordDecl * getLambdaClass() const
Retrieve the class that corresponds to the lambda.
Definition: ExprCXX.cpp:1332
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4710
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition: ExprCXX.h:4727
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3172
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3460
Represents a place-holder for an object not to be initialized by anything.
Definition: Expr.h:5420
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:945
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1168
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1218
bool isUnique() const
Definition: Expr.h:1226
Expr * getSelectedExpr() const
Definition: ExprCXX.h:4442
ParenExpr - This represents a parethesized expression, e.g.
Definition: Expr.h:2130
const Expr * getSubExpr() const
Definition: Expr.h:2145
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1986
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6305
A (possibly-)qualified type.
Definition: Type.h:940
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:7443
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition: Type.cpp:2729
@ DK_nontrivial_c_struct
Definition: Type.h:1523
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:7485
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: Type.h:1530
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition: Type.cpp:2574
@ PCK_Struct
The type is a struct containing a field whose type is neither PCK_Trivial nor PCK_VolatileTrivial.
Definition: Type.h:1502
The collection of all-type qualifiers we support.
Definition: Type.h:318
Represents a struct/union/class.
Definition: Decl.h:4168
bool hasObjectMember() const
Definition: Decl.h:4228
field_range fields() const
Definition: Decl.h:4374
field_iterator field_begin() const
Definition: Decl.cpp:5069
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5549
RecordDecl * getDecl() const
Definition: Type.h:5559
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4383
CompoundStmt * getSubStmt()
Definition: Expr.h:4400
RetTy Visit(PTR(Stmt) S, ParamTys... P)
Definition: StmtVisitor.h:44
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:185
Stmt - This represents one statement.
Definition: Stmt.h:84
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1773
Represents a reference to a non-type template parameter that has been substituted with a template arg...
Definition: ExprCXX.h:4466
bool isUnion() const
Definition: Decl.h:3790
uint64_t getPointerWidth(LangAS AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:472
bool isConstantArrayType() const
Definition: Type.h:7682
bool isArrayType() const
Definition: Type.h:7678
bool isPointerType() const
Definition: Type.h:7612
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8193
bool isReferenceType() const
Definition: Type.h:7624
bool isVariableArrayType() const
Definition: Type.h:7690
bool isCUDADeviceBuiltinSurfaceType() const
Check if the type is the CUDA device builtin surface type.
Definition: Type.cpp:4922
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition: Type.h:8020
bool isAnyComplexType() const
Definition: Type.h:7714
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition: Type.cpp:2175
bool isMemberPointerType() const
Definition: Type.h:7660
bool isAtomicType() const
Definition: Type.h:7757
bool isCUDADeviceBuiltinTextureType() const
Check if the type is the CUDA device builtin texture type.
Definition: Type.cpp:4929
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
Definition: Type.cpp:2247
bool isRealFloatingType() const
Floating point categories.
Definition: Type.cpp:2255
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8126
bool isNullPtrType() const
Definition: Type.h:7938
bool isRecordType() const
Definition: Type.h:7706
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2183
Expr * getSubExpr() const
Definition: Expr.h:2228
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4667
QualType getType() const
Definition: Decl.h:717
Represents a variable declaration or definition.
Definition: Decl.h:918
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< AtomicType > atomicType
Matches atomic types.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Definition: Format.cpp:3799
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1873
bool GE(InterpState &S, CodePtr OpPC)
Definition: Interp.h:897
The JSON file list parser is used to communicate input to InstallAPI.
@ CPlusPlus
Definition: LangStandard.h:55
LangAS
Defines the address space values used by the address space qualifier of QualType.
Definition: AddressSpaces.h:25
CastKind
CastKind - The kind of operation required for a conversion.
ExprValueKind
The categorization of expression values, currently following the C++11 scheme.
Definition: Specifiers.h:129
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition: Address.h:291
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
uint64_t Width
Definition: ASTContext.h:153