clang 19.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46namespace llvm {
47extern cl::opt<bool> EnableSingleByteCoverage;
48} // namespace llvm
49
50void CodeGenFunction::EmitStopPoint(const Stmt *S) {
51 if (CGDebugInfo *DI = getDebugInfo()) {
53 Loc = S->getBeginLoc();
54 DI->EmitLocation(Builder, Loc);
55
56 LastStopPoint = Loc;
57 }
58}
59
61 assert(S && "Null statement?");
62 PGO.setCurrentStmt(S);
63
64 // These statements have their own debug info handling.
65 if (EmitSimpleStmt(S, Attrs))
66 return;
67
68 // Check if we are generating unreachable code.
69 if (!HaveInsertPoint()) {
70 // If so, and the statement doesn't contain a label, then we do not need to
71 // generate actual code. This is safe because (1) the current point is
72 // unreachable, so we don't need to execute the code, and (2) we've already
73 // handled the statements which update internal data structures (like the
74 // local variable map) which could be used by subsequent statements.
75 if (!ContainsLabel(S)) {
76 // Verify that any decl statements were handled as simple, they may be in
77 // scope of subsequent reachable statements.
78 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
79 return;
80 }
81
82 // Otherwise, make a new block to hold the code.
84 }
85
86 // Generate a stoppoint if we are emitting debug info.
88
89 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
90 // enabled.
91 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
92 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
94 return;
95 }
96 }
97
98 switch (S->getStmtClass()) {
100 case Stmt::CXXCatchStmtClass:
101 case Stmt::SEHExceptStmtClass:
102 case Stmt::SEHFinallyStmtClass:
103 case Stmt::MSDependentExistsStmtClass:
104 llvm_unreachable("invalid statement class to emit generically");
105 case Stmt::NullStmtClass:
106 case Stmt::CompoundStmtClass:
107 case Stmt::DeclStmtClass:
108 case Stmt::LabelStmtClass:
109 case Stmt::AttributedStmtClass:
110 case Stmt::GotoStmtClass:
111 case Stmt::BreakStmtClass:
112 case Stmt::ContinueStmtClass:
113 case Stmt::DefaultStmtClass:
114 case Stmt::CaseStmtClass:
115 case Stmt::SEHLeaveStmtClass:
116 llvm_unreachable("should have emitted these statements as simple");
117
118#define STMT(Type, Base)
119#define ABSTRACT_STMT(Op)
120#define EXPR(Type, Base) \
121 case Stmt::Type##Class:
122#include "clang/AST/StmtNodes.inc"
123 {
124 // Remember the block we came in on.
125 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
126 assert(incoming && "expression emission must have an insertion point");
127
128 EmitIgnoredExpr(cast<Expr>(S));
129
130 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
131 assert(outgoing && "expression emission cleared block!");
132
133 // The expression emitters assume (reasonably!) that the insertion
134 // point is always set. To maintain that, the call-emission code
135 // for noreturn functions has to enter a new block with no
136 // predecessors. We want to kill that block and mark the current
137 // insertion point unreachable in the common case of a call like
138 // "exit();". Since expression emission doesn't otherwise create
139 // blocks with no predecessors, we can just test for that.
140 // However, we must be careful not to do this to our incoming
141 // block, because *statement* emission does sometimes create
142 // reachable blocks which will have no predecessors until later in
143 // the function. This occurs with, e.g., labels that are not
144 // reachable by fallthrough.
145 if (incoming != outgoing && outgoing->use_empty()) {
146 outgoing->eraseFromParent();
147 Builder.ClearInsertionPoint();
148 }
149 break;
150 }
151
152 case Stmt::IndirectGotoStmtClass:
153 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
154
155 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
156 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
157 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
158 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
159
160 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
161
162 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
163 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
164 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
165 case Stmt::CoroutineBodyStmtClass:
166 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
167 break;
168 case Stmt::CoreturnStmtClass:
169 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
170 break;
171 case Stmt::CapturedStmtClass: {
172 const CapturedStmt *CS = cast<CapturedStmt>(S);
174 }
175 break;
176 case Stmt::ObjCAtTryStmtClass:
177 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
178 break;
179 case Stmt::ObjCAtCatchStmtClass:
180 llvm_unreachable(
181 "@catch statements should be handled by EmitObjCAtTryStmt");
182 case Stmt::ObjCAtFinallyStmtClass:
183 llvm_unreachable(
184 "@finally statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtThrowStmtClass:
186 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
187 break;
188 case Stmt::ObjCAtSynchronizedStmtClass:
189 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
190 break;
191 case Stmt::ObjCForCollectionStmtClass:
192 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
193 break;
194 case Stmt::ObjCAutoreleasePoolStmtClass:
195 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
196 break;
197
198 case Stmt::CXXTryStmtClass:
199 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
200 break;
201 case Stmt::CXXForRangeStmtClass:
202 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
203 break;
204 case Stmt::SEHTryStmtClass:
205 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
206 break;
207 case Stmt::OMPMetaDirectiveClass:
208 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
209 break;
210 case Stmt::OMPCanonicalLoopClass:
211 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
212 break;
213 case Stmt::OMPParallelDirectiveClass:
214 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
215 break;
216 case Stmt::OMPSimdDirectiveClass:
217 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
218 break;
219 case Stmt::OMPTileDirectiveClass:
220 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
221 break;
222 case Stmt::OMPUnrollDirectiveClass:
223 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
224 break;
225 case Stmt::OMPForDirectiveClass:
226 EmitOMPForDirective(cast<OMPForDirective>(*S));
227 break;
228 case Stmt::OMPForSimdDirectiveClass:
229 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
230 break;
231 case Stmt::OMPSectionsDirectiveClass:
232 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
233 break;
234 case Stmt::OMPSectionDirectiveClass:
235 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
236 break;
237 case Stmt::OMPSingleDirectiveClass:
238 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
239 break;
240 case Stmt::OMPMasterDirectiveClass:
241 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
242 break;
243 case Stmt::OMPCriticalDirectiveClass:
244 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
245 break;
246 case Stmt::OMPParallelForDirectiveClass:
247 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
248 break;
249 case Stmt::OMPParallelForSimdDirectiveClass:
250 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
251 break;
252 case Stmt::OMPParallelMasterDirectiveClass:
253 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
254 break;
255 case Stmt::OMPParallelSectionsDirectiveClass:
256 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
257 break;
258 case Stmt::OMPTaskDirectiveClass:
259 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
260 break;
261 case Stmt::OMPTaskyieldDirectiveClass:
262 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
263 break;
264 case Stmt::OMPErrorDirectiveClass:
265 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
266 break;
267 case Stmt::OMPBarrierDirectiveClass:
268 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
269 break;
270 case Stmt::OMPTaskwaitDirectiveClass:
271 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
272 break;
273 case Stmt::OMPTaskgroupDirectiveClass:
274 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
275 break;
276 case Stmt::OMPFlushDirectiveClass:
277 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
278 break;
279 case Stmt::OMPDepobjDirectiveClass:
280 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
281 break;
282 case Stmt::OMPScanDirectiveClass:
283 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
284 break;
285 case Stmt::OMPOrderedDirectiveClass:
286 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
287 break;
288 case Stmt::OMPAtomicDirectiveClass:
289 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
290 break;
291 case Stmt::OMPTargetDirectiveClass:
292 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
293 break;
294 case Stmt::OMPTeamsDirectiveClass:
295 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
296 break;
297 case Stmt::OMPCancellationPointDirectiveClass:
298 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
299 break;
300 case Stmt::OMPCancelDirectiveClass:
301 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
302 break;
303 case Stmt::OMPTargetDataDirectiveClass:
304 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
305 break;
306 case Stmt::OMPTargetEnterDataDirectiveClass:
307 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
308 break;
309 case Stmt::OMPTargetExitDataDirectiveClass:
310 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
311 break;
312 case Stmt::OMPTargetParallelDirectiveClass:
313 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
314 break;
315 case Stmt::OMPTargetParallelForDirectiveClass:
316 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
317 break;
318 case Stmt::OMPTaskLoopDirectiveClass:
319 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
320 break;
321 case Stmt::OMPTaskLoopSimdDirectiveClass:
322 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
323 break;
324 case Stmt::OMPMasterTaskLoopDirectiveClass:
325 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
326 break;
327 case Stmt::OMPMaskedTaskLoopDirectiveClass:
328 llvm_unreachable("masked taskloop directive not supported yet.");
329 break;
330 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
332 cast<OMPMasterTaskLoopSimdDirective>(*S));
333 break;
334 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
335 llvm_unreachable("masked taskloop simd directive not supported yet.");
336 break;
337 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
339 cast<OMPParallelMasterTaskLoopDirective>(*S));
340 break;
341 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
342 llvm_unreachable("parallel masked taskloop directive not supported yet.");
343 break;
344 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
346 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
347 break;
348 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
349 llvm_unreachable(
350 "parallel masked taskloop simd directive not supported yet.");
351 break;
352 case Stmt::OMPDistributeDirectiveClass:
353 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
354 break;
355 case Stmt::OMPTargetUpdateDirectiveClass:
356 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
357 break;
358 case Stmt::OMPDistributeParallelForDirectiveClass:
360 cast<OMPDistributeParallelForDirective>(*S));
361 break;
362 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
364 cast<OMPDistributeParallelForSimdDirective>(*S));
365 break;
366 case Stmt::OMPDistributeSimdDirectiveClass:
367 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
368 break;
369 case Stmt::OMPTargetParallelForSimdDirectiveClass:
371 cast<OMPTargetParallelForSimdDirective>(*S));
372 break;
373 case Stmt::OMPTargetSimdDirectiveClass:
374 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
375 break;
376 case Stmt::OMPTeamsDistributeDirectiveClass:
377 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
378 break;
379 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
381 cast<OMPTeamsDistributeSimdDirective>(*S));
382 break;
383 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
385 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
386 break;
387 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
389 cast<OMPTeamsDistributeParallelForDirective>(*S));
390 break;
391 case Stmt::OMPTargetTeamsDirectiveClass:
392 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
393 break;
394 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
396 cast<OMPTargetTeamsDistributeDirective>(*S));
397 break;
398 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
400 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
401 break;
402 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
404 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
405 break;
406 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
408 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
409 break;
410 case Stmt::OMPInteropDirectiveClass:
411 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
412 break;
413 case Stmt::OMPDispatchDirectiveClass:
414 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
415 break;
416 case Stmt::OMPScopeDirectiveClass:
417 llvm_unreachable("scope not supported with FE outlining");
418 case Stmt::OMPMaskedDirectiveClass:
419 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
420 break;
421 case Stmt::OMPGenericLoopDirectiveClass:
422 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
423 break;
424 case Stmt::OMPTeamsGenericLoopDirectiveClass:
425 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
426 break;
427 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
429 cast<OMPTargetTeamsGenericLoopDirective>(*S));
430 break;
431 case Stmt::OMPParallelGenericLoopDirectiveClass:
433 cast<OMPParallelGenericLoopDirective>(*S));
434 break;
435 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
437 cast<OMPTargetParallelGenericLoopDirective>(*S));
438 break;
439 case Stmt::OMPParallelMaskedDirectiveClass:
440 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
441 break;
442 case Stmt::OpenACCComputeConstructClass:
443 EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
444 break;
445 }
446}
447
450 switch (S->getStmtClass()) {
451 default:
452 return false;
453 case Stmt::NullStmtClass:
454 break;
455 case Stmt::CompoundStmtClass:
456 EmitCompoundStmt(cast<CompoundStmt>(*S));
457 break;
458 case Stmt::DeclStmtClass:
459 EmitDeclStmt(cast<DeclStmt>(*S));
460 break;
461 case Stmt::LabelStmtClass:
462 EmitLabelStmt(cast<LabelStmt>(*S));
463 break;
464 case Stmt::AttributedStmtClass:
465 EmitAttributedStmt(cast<AttributedStmt>(*S));
466 break;
467 case Stmt::GotoStmtClass:
468 EmitGotoStmt(cast<GotoStmt>(*S));
469 break;
470 case Stmt::BreakStmtClass:
471 EmitBreakStmt(cast<BreakStmt>(*S));
472 break;
473 case Stmt::ContinueStmtClass:
474 EmitContinueStmt(cast<ContinueStmt>(*S));
475 break;
476 case Stmt::DefaultStmtClass:
477 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
478 break;
479 case Stmt::CaseStmtClass:
480 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
481 break;
482 case Stmt::SEHLeaveStmtClass:
483 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
484 break;
485 }
486 return true;
487}
488
489/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
490/// this captures the expression result of the last sub-statement and returns it
491/// (for use by the statement expression extension).
493 AggValueSlot AggSlot) {
494 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
495 "LLVM IR generation of compound statement ('{}')");
496
497 // Keep track of the current cleanup stack depth, including debug scopes.
498 LexicalScope Scope(*this, S.getSourceRange());
499
500 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
501}
502
505 bool GetLast,
506 AggValueSlot AggSlot) {
507
508 const Stmt *ExprResult = S.getStmtExprResult();
509 assert((!GetLast || (GetLast && ExprResult)) &&
510 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
511
512 Address RetAlloca = Address::invalid();
513
514 for (auto *CurStmt : S.body()) {
515 if (GetLast && ExprResult == CurStmt) {
516 // We have to special case labels here. They are statements, but when put
517 // at the end of a statement expression, they yield the value of their
518 // subexpression. Handle this by walking through all labels we encounter,
519 // emitting them before we evaluate the subexpr.
520 // Similar issues arise for attributed statements.
521 while (!isa<Expr>(ExprResult)) {
522 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
523 EmitLabel(LS->getDecl());
524 ExprResult = LS->getSubStmt();
525 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
526 // FIXME: Update this if we ever have attributes that affect the
527 // semantics of an expression.
528 ExprResult = AS->getSubStmt();
529 } else {
530 llvm_unreachable("unknown value statement");
531 }
532 }
533
535
536 const Expr *E = cast<Expr>(ExprResult);
537 QualType ExprTy = E->getType();
538 if (hasAggregateEvaluationKind(ExprTy)) {
539 EmitAggExpr(E, AggSlot);
540 } else {
541 // We can't return an RValue here because there might be cleanups at
542 // the end of the StmtExpr. Because of that, we have to emit the result
543 // here into a temporary alloca.
544 RetAlloca = CreateMemTemp(ExprTy);
545 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
546 /*IsInit*/ false);
547 }
548 } else {
549 EmitStmt(CurStmt);
550 }
551 }
552
553 return RetAlloca;
554}
555
556void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
557 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
558
559 // If there is a cleanup stack, then we it isn't worth trying to
560 // simplify this block (we would need to remove it from the scope map
561 // and cleanup entry).
562 if (!EHStack.empty())
563 return;
564
565 // Can only simplify direct branches.
566 if (!BI || !BI->isUnconditional())
567 return;
568
569 // Can only simplify empty blocks.
570 if (BI->getIterator() != BB->begin())
571 return;
572
573 BB->replaceAllUsesWith(BI->getSuccessor(0));
574 BI->eraseFromParent();
575 BB->eraseFromParent();
576}
577
578void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
579 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
580
581 // Fall out of the current block (if necessary).
582 EmitBranch(BB);
583
584 if (IsFinished && BB->use_empty()) {
585 delete BB;
586 return;
587 }
588
589 // Place the block after the current block, if possible, or else at
590 // the end of the function.
591 if (CurBB && CurBB->getParent())
592 CurFn->insert(std::next(CurBB->getIterator()), BB);
593 else
594 CurFn->insert(CurFn->end(), BB);
595 Builder.SetInsertPoint(BB);
596}
597
598void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
599 // Emit a branch from the current block to the target one if this
600 // was a real block. If this was just a fall-through block after a
601 // terminator, don't emit it.
602 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
603
604 if (!CurBB || CurBB->getTerminator()) {
605 // If there is no insert point or the previous block is already
606 // terminated, don't touch it.
607 } else {
608 // Otherwise, create a fall-through branch.
609 Builder.CreateBr(Target);
610 }
611
612 Builder.ClearInsertionPoint();
613}
614
615void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
616 bool inserted = false;
617 for (llvm::User *u : block->users()) {
618 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
619 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
620 inserted = true;
621 break;
622 }
623 }
624
625 if (!inserted)
626 CurFn->insert(CurFn->end(), block);
627
628 Builder.SetInsertPoint(block);
629}
630
631CodeGenFunction::JumpDest
633 JumpDest &Dest = LabelMap[D];
634 if (Dest.isValid()) return Dest;
635
636 // Create, but don't insert, the new block.
637 Dest = JumpDest(createBasicBlock(D->getName()),
640 return Dest;
641}
642
644 // Add this label to the current lexical scope if we're within any
645 // normal cleanups. Jumps "in" to this label --- when permitted by
646 // the language --- may need to be routed around such cleanups.
647 if (EHStack.hasNormalCleanups() && CurLexicalScope)
648 CurLexicalScope->addLabel(D);
649
650 JumpDest &Dest = LabelMap[D];
651
652 // If we didn't need a forward reference to this label, just go
653 // ahead and create a destination at the current scope.
654 if (!Dest.isValid()) {
656
657 // Otherwise, we need to give this label a target depth and remove
658 // it from the branch-fixups list.
659 } else {
660 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
661 Dest.setScopeDepth(EHStack.stable_begin());
662 ResolveBranchFixups(Dest.getBlock());
663 }
664
665 EmitBlock(Dest.getBlock());
666
667 // Emit debug info for labels.
668 if (CGDebugInfo *DI = getDebugInfo()) {
670 DI->setLocation(D->getLocation());
671 DI->EmitLabel(D, Builder);
672 }
673 }
674
676}
677
678/// Change the cleanup scope of the labels in this lexical scope to
679/// match the scope of the enclosing context.
681 assert(!Labels.empty());
682 EHScopeStack::stable_iterator innermostScope
684
685 // Change the scope depth of all the labels.
687 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
688 assert(CGF.LabelMap.count(*i));
689 JumpDest &dest = CGF.LabelMap.find(*i)->second;
690 assert(dest.getScopeDepth().isValid());
691 assert(innermostScope.encloses(dest.getScopeDepth()));
692 dest.setScopeDepth(innermostScope);
693 }
694
695 // Reparent the labels if the new scope also has cleanups.
696 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
697 ParentScope->Labels.append(Labels.begin(), Labels.end());
698 }
699}
700
701
703 EmitLabel(S.getDecl());
704
705 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
706 if (getLangOpts().EHAsynch && S.isSideEntry())
708
709 EmitStmt(S.getSubStmt());
710}
711
713 bool nomerge = false;
714 bool noinline = false;
715 bool alwaysinline = false;
716 const CallExpr *musttail = nullptr;
717
718 for (const auto *A : S.getAttrs()) {
719 switch (A->getKind()) {
720 default:
721 break;
722 case attr::NoMerge:
723 nomerge = true;
724 break;
725 case attr::NoInline:
726 noinline = true;
727 break;
728 case attr::AlwaysInline:
729 alwaysinline = true;
730 break;
731 case attr::MustTail: {
732 const Stmt *Sub = S.getSubStmt();
733 const ReturnStmt *R = cast<ReturnStmt>(Sub);
734 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
735 } break;
736 case attr::CXXAssume: {
737 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
738 if (getLangOpts().CXXAssumptions &&
739 !Assumption->HasSideEffects(getContext())) {
740 llvm::Value *AssumptionVal = EvaluateExprAsBool(Assumption);
741 Builder.CreateAssumption(AssumptionVal);
742 }
743 } break;
744 }
745 }
746 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
747 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
748 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
749 SaveAndRestore save_musttail(MustTailCall, musttail);
750 EmitStmt(S.getSubStmt(), S.getAttrs());
751}
752
754 // If this code is reachable then emit a stop point (if generating
755 // debug info). We have to do this ourselves because we are on the
756 // "simple" statement path.
757 if (HaveInsertPoint())
758 EmitStopPoint(&S);
759
761}
762
763
765 if (const LabelDecl *Target = S.getConstantTarget()) {
767 return;
768 }
769
770 // Ensure that we have an i8* for our PHI node.
771 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
772 Int8PtrTy, "addr");
773 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
774
775 // Get the basic block for the indirect goto.
776 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
777
778 // The first instruction in the block has to be the PHI for the switch dest,
779 // add an entry for this branch.
780 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
781
782 EmitBranch(IndGotoBB);
783}
784
785void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
786 // The else branch of a consteval if statement is always the only branch that
787 // can be runtime evaluated.
788 if (S.isConsteval()) {
789 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
790 if (Executed) {
791 RunCleanupsScope ExecutedScope(*this);
792 EmitStmt(Executed);
793 }
794 return;
795 }
796
797 // C99 6.8.4.1: The first substatement is executed if the expression compares
798 // unequal to 0. The condition must be a scalar type.
799 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
800
801 if (S.getInit())
802 EmitStmt(S.getInit());
803
804 if (S.getConditionVariable())
805 EmitDecl(*S.getConditionVariable());
806
807 // If the condition constant folds and can be elided, try to avoid emitting
808 // the condition and the dead arm of the if/else.
809 bool CondConstant;
810 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
811 S.isConstexpr())) {
812 // Figure out which block (then or else) is executed.
813 const Stmt *Executed = S.getThen();
814 const Stmt *Skipped = S.getElse();
815 if (!CondConstant) // Condition false?
816 std::swap(Executed, Skipped);
817
818 // If the skipped block has no labels in it, just emit the executed block.
819 // This avoids emitting dead code and simplifies the CFG substantially.
820 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
821 if (CondConstant)
823 if (Executed) {
824 RunCleanupsScope ExecutedScope(*this);
825 EmitStmt(Executed);
826 }
827 return;
828 }
829 }
830
831 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
832 // the conditional branch.
833 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
834 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
835 llvm::BasicBlock *ElseBlock = ContBlock;
836 if (S.getElse())
837 ElseBlock = createBasicBlock("if.else");
838
839 // Prefer the PGO based weights over the likelihood attribute.
840 // When the build isn't optimized the metadata isn't used, so don't generate
841 // it.
842 // Also, differentiate between disabled PGO and a never executed branch with
843 // PGO. Assuming PGO is in use:
844 // - we want to ignore the [[likely]] attribute if the branch is never
845 // executed,
846 // - assuming the profile is poor, preserving the attribute may still be
847 // beneficial.
848 // As an approximation, preserve the attribute only if both the branch and the
849 // parent context were not executed.
851 uint64_t ThenCount = getProfileCount(S.getThen());
852 if (!ThenCount && !getCurrentProfileCount() &&
853 CGM.getCodeGenOpts().OptimizationLevel)
854 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
855
856 // When measuring MC/DC, always fully evaluate the condition up front using
857 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
858 // executing the body of the if.then or if.else. This is useful for when
859 // there is a 'return' within the body, but this is particularly beneficial
860 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
861 // updates are kept linear and consistent.
862 if (!CGM.getCodeGenOpts().MCDCCoverage)
863 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
864 else {
865 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
866 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
867 }
868
869 // Emit the 'then' code.
870 EmitBlock(ThenBlock);
872 incrementProfileCounter(S.getThen());
873 else
875 {
876 RunCleanupsScope ThenScope(*this);
877 EmitStmt(S.getThen());
878 }
879 EmitBranch(ContBlock);
880
881 // Emit the 'else' code if present.
882 if (const Stmt *Else = S.getElse()) {
883 {
884 // There is no need to emit line number for an unconditional branch.
885 auto NL = ApplyDebugLocation::CreateEmpty(*this);
886 EmitBlock(ElseBlock);
887 }
888 // When single byte coverage mode is enabled, add a counter to else block.
891 {
892 RunCleanupsScope ElseScope(*this);
893 EmitStmt(Else);
894 }
895 {
896 // There is no need to emit line number for an unconditional branch.
897 auto NL = ApplyDebugLocation::CreateEmpty(*this);
898 EmitBranch(ContBlock);
899 }
900 }
901
902 // Emit the continuation block for code after the if.
903 EmitBlock(ContBlock, true);
904
905 // When single byte coverage mode is enabled, add a counter to continuation
906 // block.
909}
910
911bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
912 bool HasEmptyBody) {
913 if (CGM.getCodeGenOpts().getFiniteLoops() ==
915 return false;
916
917 // Now apply rules for plain C (see 6.8.5.6 in C11).
918 // Loops with constant conditions do not have to make progress in any C
919 // version.
920 // As an extension, we consisider loops whose constant expression
921 // can be constant-folded.
923 bool CondIsConstInt =
924 !ControllingExpression ||
925 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
926 Result.Val.isInt());
927
928 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
929 Result.Val.getInt().getBoolValue());
930
931 // Loops with non-constant conditions must make progress in C11 and later.
932 if (getLangOpts().C11 && !CondIsConstInt)
933 return true;
934
935 // [C++26][intro.progress] (DR)
936 // The implementation may assume that any thread will eventually do one of the
937 // following:
938 // [...]
939 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
940 if (CGM.getCodeGenOpts().getFiniteLoops() ==
942 getLangOpts().CPlusPlus11) {
943 if (HasEmptyBody && CondIsTrue) {
944 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
945 return false;
946 }
947 return true;
948 }
949 return false;
950}
951
952// [C++26][stmt.iter.general] (DR)
953// A trivially empty iteration statement is an iteration statement matching one
954// of the following forms:
955// - while ( expression ) ;
956// - while ( expression ) { }
957// - do ; while ( expression ) ;
958// - do { } while ( expression ) ;
959// - for ( init-statement expression(opt); ) ;
960// - for ( init-statement expression(opt); ) { }
961template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
962 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
963 if (S.getInc())
964 return false;
965 }
966 const Stmt *Body = S.getBody();
967 if (!Body || isa<NullStmt>(Body))
968 return true;
969 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
970 return Compound->body_empty();
971 return false;
972}
973
975 ArrayRef<const Attr *> WhileAttrs) {
976 // Emit the header for the loop, which will also become
977 // the continue target.
978 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
979 EmitBlock(LoopHeader.getBlock());
980
982 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(
983 LoopHeader.getBlock(), ConvergenceTokenStack.back()));
984
985 // Create an exit block for when the condition fails, which will
986 // also become the break target.
987 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
988
989 // Store the blocks to use for break and continue.
990 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
991
992 // C++ [stmt.while]p2:
993 // When the condition of a while statement is a declaration, the
994 // scope of the variable that is declared extends from its point
995 // of declaration (3.3.2) to the end of the while statement.
996 // [...]
997 // The object created in a condition is destroyed and created
998 // with each iteration of the loop.
999 RunCleanupsScope ConditionScope(*this);
1000
1001 if (S.getConditionVariable())
1002 EmitDecl(*S.getConditionVariable());
1003
1004 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1005 // evaluation of the controlling expression takes place before each
1006 // execution of the loop body.
1007 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1008
1009 // while(1) is common, avoid extra exit blocks. Be sure
1010 // to correctly handle break/continue though.
1011 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1012 bool EmitBoolCondBranch = !C || !C->isOne();
1013 const SourceRange &R = S.getSourceRange();
1014 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1015 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1017 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1018
1019 // When single byte coverage mode is enabled, add a counter to loop condition.
1021 incrementProfileCounter(S.getCond());
1022
1023 // As long as the condition is true, go to the loop body.
1024 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1025 if (EmitBoolCondBranch) {
1026 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1027 if (ConditionScope.requiresCleanups())
1028 ExitBlock = createBasicBlock("while.exit");
1029 llvm::MDNode *Weights =
1030 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1031 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1032 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1033 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1034 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1035
1036 if (ExitBlock != LoopExit.getBlock()) {
1037 EmitBlock(ExitBlock);
1039 }
1040 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1041 CGM.getDiags().Report(A->getLocation(),
1042 diag::warn_attribute_has_no_effect_on_infinite_loop)
1043 << A << A->getRange();
1045 S.getWhileLoc(),
1046 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1047 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
1048 }
1049
1050 // Emit the loop body. We have to emit this in a cleanup scope
1051 // because it might be a singleton DeclStmt.
1052 {
1053 RunCleanupsScope BodyScope(*this);
1054 EmitBlock(LoopBody);
1055 // When single byte coverage mode is enabled, add a counter to the body.
1057 incrementProfileCounter(S.getBody());
1058 else
1060 EmitStmt(S.getBody());
1061 }
1062
1063 BreakContinueStack.pop_back();
1064
1065 // Immediately force cleanup.
1066 ConditionScope.ForceCleanup();
1067
1068 EmitStopPoint(&S);
1069 // Branch to the loop header again.
1070 EmitBranch(LoopHeader.getBlock());
1071
1072 LoopStack.pop();
1073
1074 // Emit the exit block.
1075 EmitBlock(LoopExit.getBlock(), true);
1076
1077 // The LoopHeader typically is just a branch if we skipped emitting
1078 // a branch, try to erase it.
1079 if (!EmitBoolCondBranch)
1080 SimplifyForwardingBlocks(LoopHeader.getBlock());
1081
1082 // When single byte coverage mode is enabled, add a counter to continuation
1083 // block.
1086
1088 ConvergenceTokenStack.pop_back();
1089}
1090
1092 ArrayRef<const Attr *> DoAttrs) {
1093 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
1094 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1095
1096 uint64_t ParentCount = getCurrentProfileCount();
1097
1098 // Store the blocks to use for break and continue.
1099 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
1100
1101 // Emit the body of the loop.
1102 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1103
1105 EmitBlockWithFallThrough(LoopBody, S.getBody());
1106 else
1107 EmitBlockWithFallThrough(LoopBody, &S);
1108
1110 ConvergenceTokenStack.push_back(
1111 emitConvergenceLoopToken(LoopBody, ConvergenceTokenStack.back()));
1112
1113 {
1114 RunCleanupsScope BodyScope(*this);
1115 EmitStmt(S.getBody());
1116 }
1117
1118 EmitBlock(LoopCond.getBlock());
1119 // When single byte coverage mode is enabled, add a counter to loop condition.
1121 incrementProfileCounter(S.getCond());
1122
1123 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1124 // after each execution of the loop body."
1125
1126 // Evaluate the conditional in the while header.
1127 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1128 // compares unequal to 0. The condition must be a scalar type.
1129 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1130
1131 BreakContinueStack.pop_back();
1132
1133 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1134 // to correctly handle break/continue though.
1135 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1136 bool EmitBoolCondBranch = !C || !C->isZero();
1137
1138 const SourceRange &R = S.getSourceRange();
1139 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1142 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1143
1144 // As long as the condition is true, iterate the loop.
1145 if (EmitBoolCondBranch) {
1146 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1147 Builder.CreateCondBr(
1148 BoolCondVal, LoopBody, LoopExit.getBlock(),
1149 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1150 }
1151
1152 LoopStack.pop();
1153
1154 // Emit the exit block.
1155 EmitBlock(LoopExit.getBlock());
1156
1157 // The DoCond block typically is just a branch if we skipped
1158 // emitting a branch, try to erase it.
1159 if (!EmitBoolCondBranch)
1160 SimplifyForwardingBlocks(LoopCond.getBlock());
1161
1162 // When single byte coverage mode is enabled, add a counter to continuation
1163 // block.
1166
1168 ConvergenceTokenStack.pop_back();
1169}
1170
1172 ArrayRef<const Attr *> ForAttrs) {
1173 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1174
1175 LexicalScope ForScope(*this, S.getSourceRange());
1176
1177 // Evaluate the first part before the loop.
1178 if (S.getInit())
1179 EmitStmt(S.getInit());
1180
1181 // Start the loop with a block that tests the condition.
1182 // If there's an increment, the continue scope will be overwritten
1183 // later.
1184 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1185 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1186 EmitBlock(CondBlock);
1187
1189 ConvergenceTokenStack.push_back(
1190 emitConvergenceLoopToken(CondBlock, ConvergenceTokenStack.back()));
1191
1192 const SourceRange &R = S.getSourceRange();
1193 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1196 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1197
1198 // Create a cleanup scope for the condition variable cleanups.
1199 LexicalScope ConditionScope(*this, S.getSourceRange());
1200
1201 // If the for loop doesn't have an increment we can just use the condition as
1202 // the continue block. Otherwise, if there is no condition variable, we can
1203 // form the continue block now. If there is a condition variable, we can't
1204 // form the continue block until after we've emitted the condition, because
1205 // the condition is in scope in the increment, but Sema's jump diagnostics
1206 // ensure that there are no continues from the condition variable that jump
1207 // to the loop increment.
1208 JumpDest Continue;
1209 if (!S.getInc())
1210 Continue = CondDest;
1211 else if (!S.getConditionVariable())
1212 Continue = getJumpDestInCurrentScope("for.inc");
1213 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1214
1215 if (S.getCond()) {
1216 // If the for statement has a condition scope, emit the local variable
1217 // declaration.
1218 if (S.getConditionVariable()) {
1219 EmitDecl(*S.getConditionVariable());
1220
1221 // We have entered the condition variable's scope, so we're now able to
1222 // jump to the continue block.
1223 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1224 BreakContinueStack.back().ContinueBlock = Continue;
1225 }
1226
1227 // When single byte coverage mode is enabled, add a counter to loop
1228 // condition.
1230 incrementProfileCounter(S.getCond());
1231
1232 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1233 // If there are any cleanups between here and the loop-exit scope,
1234 // create a block to stage a loop exit along.
1235 if (ForScope.requiresCleanups())
1236 ExitBlock = createBasicBlock("for.cond.cleanup");
1237
1238 // As long as the condition is true, iterate the loop.
1239 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1240
1241 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1242 // compares unequal to 0. The condition must be a scalar type.
1243 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1244 llvm::MDNode *Weights =
1245 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1246 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1247 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1248 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1249
1250 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1251
1252 if (ExitBlock != LoopExit.getBlock()) {
1253 EmitBlock(ExitBlock);
1255 }
1256
1257 EmitBlock(ForBody);
1258 } else {
1259 // Treat it as a non-zero constant. Don't even create a new block for the
1260 // body, just fall into it.
1261 }
1262
1263 // When single byte coverage mode is enabled, add a counter to the body.
1265 incrementProfileCounter(S.getBody());
1266 else
1268 {
1269 // Create a separate cleanup scope for the body, in case it is not
1270 // a compound statement.
1271 RunCleanupsScope BodyScope(*this);
1272 EmitStmt(S.getBody());
1273 }
1274
1275 // If there is an increment, emit it next.
1276 if (S.getInc()) {
1277 EmitBlock(Continue.getBlock());
1278 EmitStmt(S.getInc());
1280 incrementProfileCounter(S.getInc());
1281 }
1282
1283 BreakContinueStack.pop_back();
1284
1285 ConditionScope.ForceCleanup();
1286
1287 EmitStopPoint(&S);
1288 EmitBranch(CondBlock);
1289
1290 ForScope.ForceCleanup();
1291
1292 LoopStack.pop();
1293
1294 // Emit the fall-through block.
1295 EmitBlock(LoopExit.getBlock(), true);
1296
1297 // When single byte coverage mode is enabled, add a counter to continuation
1298 // block.
1301
1303 ConvergenceTokenStack.pop_back();
1304}
1305
1306void
1308 ArrayRef<const Attr *> ForAttrs) {
1309 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1310
1311 LexicalScope ForScope(*this, S.getSourceRange());
1312
1313 // Evaluate the first pieces before the loop.
1314 if (S.getInit())
1315 EmitStmt(S.getInit());
1316 EmitStmt(S.getRangeStmt());
1317 EmitStmt(S.getBeginStmt());
1318 EmitStmt(S.getEndStmt());
1319
1320 // Start the loop with a block that tests the condition.
1321 // If there's an increment, the continue scope will be overwritten
1322 // later.
1323 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1324 EmitBlock(CondBlock);
1325
1327 ConvergenceTokenStack.push_back(
1328 emitConvergenceLoopToken(CondBlock, ConvergenceTokenStack.back()));
1329
1330 const SourceRange &R = S.getSourceRange();
1331 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1334
1335 // If there are any cleanups between here and the loop-exit scope,
1336 // create a block to stage a loop exit along.
1337 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1338 if (ForScope.requiresCleanups())
1339 ExitBlock = createBasicBlock("for.cond.cleanup");
1340
1341 // The loop body, consisting of the specified body and the loop variable.
1342 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1343
1344 // The body is executed if the expression, contextually converted
1345 // to bool, is true.
1346 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1347 llvm::MDNode *Weights =
1348 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1349 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1350 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1351 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1352 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1353
1354 if (ExitBlock != LoopExit.getBlock()) {
1355 EmitBlock(ExitBlock);
1357 }
1358
1359 EmitBlock(ForBody);
1361 incrementProfileCounter(S.getBody());
1362 else
1364
1365 // Create a block for the increment. In case of a 'continue', we jump there.
1366 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1367
1368 // Store the blocks to use for break and continue.
1369 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1370
1371 {
1372 // Create a separate cleanup scope for the loop variable and body.
1373 LexicalScope BodyScope(*this, S.getSourceRange());
1374 EmitStmt(S.getLoopVarStmt());
1375 EmitStmt(S.getBody());
1376 }
1377
1378 EmitStopPoint(&S);
1379 // If there is an increment, emit it next.
1380 EmitBlock(Continue.getBlock());
1381 EmitStmt(S.getInc());
1382
1383 BreakContinueStack.pop_back();
1384
1385 EmitBranch(CondBlock);
1386
1387 ForScope.ForceCleanup();
1388
1389 LoopStack.pop();
1390
1391 // Emit the fall-through block.
1392 EmitBlock(LoopExit.getBlock(), true);
1393
1394 // When single byte coverage mode is enabled, add a counter to continuation
1395 // block.
1398
1400 ConvergenceTokenStack.pop_back();
1401}
1402
1403void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1404 if (RV.isScalar()) {
1406 } else if (RV.isAggregate()) {
1407 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1410 } else {
1412 /*init*/ true);
1413 }
1415}
1416
1417namespace {
1418// RAII struct used to save and restore a return statment's result expression.
1419struct SaveRetExprRAII {
1420 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1421 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1422 CGF.RetExpr = RetExpr;
1423 }
1424 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1425 const Expr *OldRetExpr;
1426 CodeGenFunction &CGF;
1427};
1428} // namespace
1429
1430/// Determine if the given call uses the swiftasync calling convention.
1431static bool isSwiftAsyncCallee(const CallExpr *CE) {
1432 auto calleeQualType = CE->getCallee()->getType();
1433 const FunctionType *calleeType = nullptr;
1434 if (calleeQualType->isFunctionPointerType() ||
1435 calleeQualType->isFunctionReferenceType() ||
1436 calleeQualType->isBlockPointerType() ||
1437 calleeQualType->isMemberFunctionPointerType()) {
1438 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1439 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1440 calleeType = ty;
1441 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1442 if (auto methodDecl = CMCE->getMethodDecl()) {
1443 // getMethodDecl() doesn't handle member pointers at the moment.
1444 calleeType = methodDecl->getType()->castAs<FunctionType>();
1445 } else {
1446 return false;
1447 }
1448 } else {
1449 return false;
1450 }
1451 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1452}
1453
1454/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1455/// if the function returns void, or may be missing one if the function returns
1456/// non-void. Fun stuff :).
1458 if (requiresReturnValueCheck()) {
1459 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1460 auto *SLocPtr =
1461 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1462 llvm::GlobalVariable::PrivateLinkage, SLoc);
1463 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1465 assert(ReturnLocation.isValid() && "No valid return location");
1466 Builder.CreateStore(SLocPtr, ReturnLocation);
1467 }
1468
1469 // Returning from an outlined SEH helper is UB, and we already warn on it.
1470 if (IsOutlinedSEHHelper) {
1471 Builder.CreateUnreachable();
1472 Builder.ClearInsertionPoint();
1473 }
1474
1475 // Emit the result value, even if unused, to evaluate the side effects.
1476 const Expr *RV = S.getRetValue();
1477
1478 // Record the result expression of the return statement. The recorded
1479 // expression is used to determine whether a block capture's lifetime should
1480 // end at the end of the full expression as opposed to the end of the scope
1481 // enclosing the block expression.
1482 //
1483 // This permits a small, easily-implemented exception to our over-conservative
1484 // rules about not jumping to statements following block literals with
1485 // non-trivial cleanups.
1486 SaveRetExprRAII SaveRetExpr(RV, *this);
1487
1488 RunCleanupsScope cleanupScope(*this);
1489 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1490 RV = EWC->getSubExpr();
1491
1492 // If we're in a swiftasynccall function, and the return expression is a
1493 // call to a swiftasynccall function, mark the call as the musttail call.
1494 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1495 if (RV && CurFnInfo &&
1497 if (auto CE = dyn_cast<CallExpr>(RV)) {
1498 if (isSwiftAsyncCallee(CE)) {
1499 SaveMustTail.emplace(MustTailCall, CE);
1500 }
1501 }
1502 }
1503
1504 // FIXME: Clean this up by using an LValue for ReturnTemp,
1505 // EmitStoreThroughLValue, and EmitAnyExpr.
1506 // Check if the NRVO candidate was not globalized in OpenMP mode.
1507 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1508 S.getNRVOCandidate()->isNRVOVariable() &&
1509 (!getLangOpts().OpenMP ||
1511 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1512 .isValid())) {
1513 // Apply the named return value optimization for this return statement,
1514 // which means doing nothing: the appropriate result has already been
1515 // constructed into the NRVO variable.
1516
1517 // If there is an NRVO flag for this variable, set it to 1 into indicate
1518 // that the cleanup code should not destroy the variable.
1519 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1520 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1521 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1522 // Make sure not to return anything, but evaluate the expression
1523 // for side effects.
1524 if (RV) {
1525 EmitAnyExpr(RV);
1526 }
1527 } else if (!RV) {
1528 // Do nothing (return value is left uninitialized)
1529 } else if (FnRetTy->isReferenceType()) {
1530 // If this function returns a reference, take the address of the expression
1531 // rather than the value.
1533 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1534 } else {
1535 switch (getEvaluationKind(RV->getType())) {
1536 case TEK_Scalar:
1538 break;
1539 case TEK_Complex:
1541 /*isInit*/ true);
1542 break;
1543 case TEK_Aggregate:
1550 break;
1551 }
1552 }
1553
1554 ++NumReturnExprs;
1555 if (!RV || RV->isEvaluatable(getContext()))
1556 ++NumSimpleReturnExprs;
1557
1558 cleanupScope.ForceCleanup();
1560}
1561
1563 // As long as debug info is modeled with instructions, we have to ensure we
1564 // have a place to insert here and write the stop point here.
1565 if (HaveInsertPoint())
1566 EmitStopPoint(&S);
1567
1568 for (const auto *I : S.decls())
1569 EmitDecl(*I);
1570}
1571
1573 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1574
1575 // If this code is reachable then emit a stop point (if generating
1576 // debug info). We have to do this ourselves because we are on the
1577 // "simple" statement path.
1578 if (HaveInsertPoint())
1579 EmitStopPoint(&S);
1580
1581 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1582}
1583
1585 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1586
1587 // If this code is reachable then emit a stop point (if generating
1588 // debug info). We have to do this ourselves because we are on the
1589 // "simple" statement path.
1590 if (HaveInsertPoint())
1591 EmitStopPoint(&S);
1592
1593 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1594}
1595
1596/// EmitCaseStmtRange - If case statement range is not too big then
1597/// add multiple cases to switch instruction, one for each value within
1598/// the range. If range is too big then emit "if" condition check.
1600 ArrayRef<const Attr *> Attrs) {
1601 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1602
1603 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1604 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1605
1606 // Emit the code for this case. We do this first to make sure it is
1607 // properly chained from our predecessor before generating the
1608 // switch machinery to enter this block.
1609 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1610 EmitBlockWithFallThrough(CaseDest, &S);
1611 EmitStmt(S.getSubStmt());
1612
1613 // If range is empty, do nothing.
1614 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1615 return;
1616
1618 llvm::APInt Range = RHS - LHS;
1619 // FIXME: parameters such as this should not be hardcoded.
1620 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1621 // Range is small enough to add multiple switch instruction cases.
1622 uint64_t Total = getProfileCount(&S);
1623 unsigned NCases = Range.getZExtValue() + 1;
1624 // We only have one region counter for the entire set of cases here, so we
1625 // need to divide the weights evenly between the generated cases, ensuring
1626 // that the total weight is preserved. E.g., a weight of 5 over three cases
1627 // will be distributed as weights of 2, 2, and 1.
1628 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1629 for (unsigned I = 0; I != NCases; ++I) {
1630 if (SwitchWeights)
1631 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1632 else if (SwitchLikelihood)
1633 SwitchLikelihood->push_back(LH);
1634
1635 if (Rem)
1636 Rem--;
1637 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1638 ++LHS;
1639 }
1640 return;
1641 }
1642
1643 // The range is too big. Emit "if" condition into a new block,
1644 // making sure to save and restore the current insertion point.
1645 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1646
1647 // Push this test onto the chain of range checks (which terminates
1648 // in the default basic block). The switch's default will be changed
1649 // to the top of this chain after switch emission is complete.
1650 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1651 CaseRangeBlock = createBasicBlock("sw.caserange");
1652
1653 CurFn->insert(CurFn->end(), CaseRangeBlock);
1654 Builder.SetInsertPoint(CaseRangeBlock);
1655
1656 // Emit range check.
1657 llvm::Value *Diff =
1658 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1659 llvm::Value *Cond =
1660 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1661
1662 llvm::MDNode *Weights = nullptr;
1663 if (SwitchWeights) {
1664 uint64_t ThisCount = getProfileCount(&S);
1665 uint64_t DefaultCount = (*SwitchWeights)[0];
1666 Weights = createProfileWeights(ThisCount, DefaultCount);
1667
1668 // Since we're chaining the switch default through each large case range, we
1669 // need to update the weight for the default, ie, the first case, to include
1670 // this case.
1671 (*SwitchWeights)[0] += ThisCount;
1672 } else if (SwitchLikelihood)
1673 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1674
1675 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1676
1677 // Restore the appropriate insertion point.
1678 if (RestoreBB)
1679 Builder.SetInsertPoint(RestoreBB);
1680 else
1681 Builder.ClearInsertionPoint();
1682}
1683
1685 ArrayRef<const Attr *> Attrs) {
1686 // If there is no enclosing switch instance that we're aware of, then this
1687 // case statement and its block can be elided. This situation only happens
1688 // when we've constant-folded the switch, are emitting the constant case,
1689 // and part of the constant case includes another case statement. For
1690 // instance: switch (4) { case 4: do { case 5: } while (1); }
1691 if (!SwitchInsn) {
1692 EmitStmt(S.getSubStmt());
1693 return;
1694 }
1695
1696 // Handle case ranges.
1697 if (S.getRHS()) {
1698 EmitCaseStmtRange(S, Attrs);
1699 return;
1700 }
1701
1702 llvm::ConstantInt *CaseVal =
1703 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1704
1705 // Emit debuginfo for the case value if it is an enum value.
1706 const ConstantExpr *CE;
1707 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1708 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1709 else
1710 CE = dyn_cast<ConstantExpr>(S.getLHS());
1711 if (CE) {
1712 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1713 if (CGDebugInfo *Dbg = getDebugInfo())
1715 Dbg->EmitGlobalVariable(DE->getDecl(),
1716 APValue(llvm::APSInt(CaseVal->getValue())));
1717 }
1718
1719 if (SwitchLikelihood)
1720 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1721
1722 // If the body of the case is just a 'break', try to not emit an empty block.
1723 // If we're profiling or we're not optimizing, leave the block in for better
1724 // debug and coverage analysis.
1726 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1727 isa<BreakStmt>(S.getSubStmt())) {
1728 JumpDest Block = BreakContinueStack.back().BreakBlock;
1729
1730 // Only do this optimization if there are no cleanups that need emitting.
1732 if (SwitchWeights)
1733 SwitchWeights->push_back(getProfileCount(&S));
1734 SwitchInsn->addCase(CaseVal, Block.getBlock());
1735
1736 // If there was a fallthrough into this case, make sure to redirect it to
1737 // the end of the switch as well.
1738 if (Builder.GetInsertBlock()) {
1739 Builder.CreateBr(Block.getBlock());
1740 Builder.ClearInsertionPoint();
1741 }
1742 return;
1743 }
1744 }
1745
1746 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1747 EmitBlockWithFallThrough(CaseDest, &S);
1748 if (SwitchWeights)
1749 SwitchWeights->push_back(getProfileCount(&S));
1750 SwitchInsn->addCase(CaseVal, CaseDest);
1751
1752 // Recursively emitting the statement is acceptable, but is not wonderful for
1753 // code where we have many case statements nested together, i.e.:
1754 // case 1:
1755 // case 2:
1756 // case 3: etc.
1757 // Handling this recursively will create a new block for each case statement
1758 // that falls through to the next case which is IR intensive. It also causes
1759 // deep recursion which can run into stack depth limitations. Handle
1760 // sequential non-range case statements specially.
1761 //
1762 // TODO When the next case has a likelihood attribute the code returns to the
1763 // recursive algorithm. Maybe improve this case if it becomes common practice
1764 // to use a lot of attributes.
1765 const CaseStmt *CurCase = &S;
1766 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1767
1768 // Otherwise, iteratively add consecutive cases to this switch stmt.
1769 while (NextCase && NextCase->getRHS() == nullptr) {
1770 CurCase = NextCase;
1771 llvm::ConstantInt *CaseVal =
1772 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1773
1774 if (SwitchWeights)
1775 SwitchWeights->push_back(getProfileCount(NextCase));
1777 CaseDest = createBasicBlock("sw.bb");
1778 EmitBlockWithFallThrough(CaseDest, CurCase);
1779 }
1780 // Since this loop is only executed when the CaseStmt has no attributes
1781 // use a hard-coded value.
1782 if (SwitchLikelihood)
1783 SwitchLikelihood->push_back(Stmt::LH_None);
1784
1785 SwitchInsn->addCase(CaseVal, CaseDest);
1786 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1787 }
1788
1789 // Generate a stop point for debug info if the case statement is
1790 // followed by a default statement. A fallthrough case before a
1791 // default case gets its own branch target.
1792 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1793 EmitStopPoint(CurCase);
1794
1795 // Normal default recursion for non-cases.
1796 EmitStmt(CurCase->getSubStmt());
1797}
1798
1800 ArrayRef<const Attr *> Attrs) {
1801 // If there is no enclosing switch instance that we're aware of, then this
1802 // default statement can be elided. This situation only happens when we've
1803 // constant-folded the switch.
1804 if (!SwitchInsn) {
1805 EmitStmt(S.getSubStmt());
1806 return;
1807 }
1808
1809 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1810 assert(DefaultBlock->empty() &&
1811 "EmitDefaultStmt: Default block already defined?");
1812
1813 if (SwitchLikelihood)
1814 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1815
1816 EmitBlockWithFallThrough(DefaultBlock, &S);
1817
1818 EmitStmt(S.getSubStmt());
1819}
1820
1821/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1822/// constant value that is being switched on, see if we can dead code eliminate
1823/// the body of the switch to a simple series of statements to emit. Basically,
1824/// on a switch (5) we want to find these statements:
1825/// case 5:
1826/// printf(...); <--
1827/// ++i; <--
1828/// break;
1829///
1830/// and add them to the ResultStmts vector. If it is unsafe to do this
1831/// transformation (for example, one of the elided statements contains a label
1832/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1833/// should include statements after it (e.g. the printf() line is a substmt of
1834/// the case) then return CSFC_FallThrough. If we handled it and found a break
1835/// statement, then return CSFC_Success.
1836///
1837/// If Case is non-null, then we are looking for the specified case, checking
1838/// that nothing we jump over contains labels. If Case is null, then we found
1839/// the case and are looking for the break.
1840///
1841/// If the recursive walk actually finds our Case, then we set FoundCase to
1842/// true.
1843///
1846 const SwitchCase *Case,
1847 bool &FoundCase,
1848 SmallVectorImpl<const Stmt*> &ResultStmts) {
1849 // If this is a null statement, just succeed.
1850 if (!S)
1851 return Case ? CSFC_Success : CSFC_FallThrough;
1852
1853 // If this is the switchcase (case 4: or default) that we're looking for, then
1854 // we're in business. Just add the substatement.
1855 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1856 if (S == Case) {
1857 FoundCase = true;
1858 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1859 ResultStmts);
1860 }
1861
1862 // Otherwise, this is some other case or default statement, just ignore it.
1863 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1864 ResultStmts);
1865 }
1866
1867 // If we are in the live part of the code and we found our break statement,
1868 // return a success!
1869 if (!Case && isa<BreakStmt>(S))
1870 return CSFC_Success;
1871
1872 // If this is a switch statement, then it might contain the SwitchCase, the
1873 // break, or neither.
1874 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1875 // Handle this as two cases: we might be looking for the SwitchCase (if so
1876 // the skipped statements must be skippable) or we might already have it.
1877 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1878 bool StartedInLiveCode = FoundCase;
1879 unsigned StartSize = ResultStmts.size();
1880
1881 // If we've not found the case yet, scan through looking for it.
1882 if (Case) {
1883 // Keep track of whether we see a skipped declaration. The code could be
1884 // using the declaration even if it is skipped, so we can't optimize out
1885 // the decl if the kept statements might refer to it.
1886 bool HadSkippedDecl = false;
1887
1888 // If we're looking for the case, just see if we can skip each of the
1889 // substatements.
1890 for (; Case && I != E; ++I) {
1891 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1892
1893 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1894 case CSFC_Failure: return CSFC_Failure;
1895 case CSFC_Success:
1896 // A successful result means that either 1) that the statement doesn't
1897 // have the case and is skippable, or 2) does contain the case value
1898 // and also contains the break to exit the switch. In the later case,
1899 // we just verify the rest of the statements are elidable.
1900 if (FoundCase) {
1901 // If we found the case and skipped declarations, we can't do the
1902 // optimization.
1903 if (HadSkippedDecl)
1904 return CSFC_Failure;
1905
1906 for (++I; I != E; ++I)
1907 if (CodeGenFunction::ContainsLabel(*I, true))
1908 return CSFC_Failure;
1909 return CSFC_Success;
1910 }
1911 break;
1912 case CSFC_FallThrough:
1913 // If we have a fallthrough condition, then we must have found the
1914 // case started to include statements. Consider the rest of the
1915 // statements in the compound statement as candidates for inclusion.
1916 assert(FoundCase && "Didn't find case but returned fallthrough?");
1917 // We recursively found Case, so we're not looking for it anymore.
1918 Case = nullptr;
1919
1920 // If we found the case and skipped declarations, we can't do the
1921 // optimization.
1922 if (HadSkippedDecl)
1923 return CSFC_Failure;
1924 break;
1925 }
1926 }
1927
1928 if (!FoundCase)
1929 return CSFC_Success;
1930
1931 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1932 }
1933
1934 // If we have statements in our range, then we know that the statements are
1935 // live and need to be added to the set of statements we're tracking.
1936 bool AnyDecls = false;
1937 for (; I != E; ++I) {
1939
1940 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1941 case CSFC_Failure: return CSFC_Failure;
1942 case CSFC_FallThrough:
1943 // A fallthrough result means that the statement was simple and just
1944 // included in ResultStmt, keep adding them afterwards.
1945 break;
1946 case CSFC_Success:
1947 // A successful result means that we found the break statement and
1948 // stopped statement inclusion. We just ensure that any leftover stmts
1949 // are skippable and return success ourselves.
1950 for (++I; I != E; ++I)
1951 if (CodeGenFunction::ContainsLabel(*I, true))
1952 return CSFC_Failure;
1953 return CSFC_Success;
1954 }
1955 }
1956
1957 // If we're about to fall out of a scope without hitting a 'break;', we
1958 // can't perform the optimization if there were any decls in that scope
1959 // (we'd lose their end-of-lifetime).
1960 if (AnyDecls) {
1961 // If the entire compound statement was live, there's one more thing we
1962 // can try before giving up: emit the whole thing as a single statement.
1963 // We can do that unless the statement contains a 'break;'.
1964 // FIXME: Such a break must be at the end of a construct within this one.
1965 // We could emit this by just ignoring the BreakStmts entirely.
1966 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1967 ResultStmts.resize(StartSize);
1968 ResultStmts.push_back(S);
1969 } else {
1970 return CSFC_Failure;
1971 }
1972 }
1973
1974 return CSFC_FallThrough;
1975 }
1976
1977 // Okay, this is some other statement that we don't handle explicitly, like a
1978 // for statement or increment etc. If we are skipping over this statement,
1979 // just verify it doesn't have labels, which would make it invalid to elide.
1980 if (Case) {
1981 if (CodeGenFunction::ContainsLabel(S, true))
1982 return CSFC_Failure;
1983 return CSFC_Success;
1984 }
1985
1986 // Otherwise, we want to include this statement. Everything is cool with that
1987 // so long as it doesn't contain a break out of the switch we're in.
1989
1990 // Otherwise, everything is great. Include the statement and tell the caller
1991 // that we fall through and include the next statement as well.
1992 ResultStmts.push_back(S);
1993 return CSFC_FallThrough;
1994}
1995
1996/// FindCaseStatementsForValue - Find the case statement being jumped to and
1997/// then invoke CollectStatementsForCase to find the list of statements to emit
1998/// for a switch on constant. See the comment above CollectStatementsForCase
1999/// for more details.
2001 const llvm::APSInt &ConstantCondValue,
2002 SmallVectorImpl<const Stmt*> &ResultStmts,
2003 ASTContext &C,
2004 const SwitchCase *&ResultCase) {
2005 // First step, find the switch case that is being branched to. We can do this
2006 // efficiently by scanning the SwitchCase list.
2007 const SwitchCase *Case = S.getSwitchCaseList();
2008 const DefaultStmt *DefaultCase = nullptr;
2009
2010 for (; Case; Case = Case->getNextSwitchCase()) {
2011 // It's either a default or case. Just remember the default statement in
2012 // case we're not jumping to any numbered cases.
2013 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2014 DefaultCase = DS;
2015 continue;
2016 }
2017
2018 // Check to see if this case is the one we're looking for.
2019 const CaseStmt *CS = cast<CaseStmt>(Case);
2020 // Don't handle case ranges yet.
2021 if (CS->getRHS()) return false;
2022
2023 // If we found our case, remember it as 'case'.
2024 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2025 break;
2026 }
2027
2028 // If we didn't find a matching case, we use a default if it exists, or we
2029 // elide the whole switch body!
2030 if (!Case) {
2031 // It is safe to elide the body of the switch if it doesn't contain labels
2032 // etc. If it is safe, return successfully with an empty ResultStmts list.
2033 if (!DefaultCase)
2035 Case = DefaultCase;
2036 }
2037
2038 // Ok, we know which case is being jumped to, try to collect all the
2039 // statements that follow it. This can fail for a variety of reasons. Also,
2040 // check to see that the recursive walk actually found our case statement.
2041 // Insane cases like this can fail to find it in the recursive walk since we
2042 // don't handle every stmt kind:
2043 // switch (4) {
2044 // while (1) {
2045 // case 4: ...
2046 bool FoundCase = false;
2047 ResultCase = Case;
2048 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2049 ResultStmts) != CSFC_Failure &&
2050 FoundCase;
2051}
2052
2053static std::optional<SmallVector<uint64_t, 16>>
2055 // Are there enough branches to weight them?
2056 if (Likelihoods.size() <= 1)
2057 return std::nullopt;
2058
2059 uint64_t NumUnlikely = 0;
2060 uint64_t NumNone = 0;
2061 uint64_t NumLikely = 0;
2062 for (const auto LH : Likelihoods) {
2063 switch (LH) {
2064 case Stmt::LH_Unlikely:
2065 ++NumUnlikely;
2066 break;
2067 case Stmt::LH_None:
2068 ++NumNone;
2069 break;
2070 case Stmt::LH_Likely:
2071 ++NumLikely;
2072 break;
2073 }
2074 }
2075
2076 // Is there a likelihood attribute used?
2077 if (NumUnlikely == 0 && NumLikely == 0)
2078 return std::nullopt;
2079
2080 // When multiple cases share the same code they can be combined during
2081 // optimization. In that case the weights of the branch will be the sum of
2082 // the individual weights. Make sure the combined sum of all neutral cases
2083 // doesn't exceed the value of a single likely attribute.
2084 // The additions both avoid divisions by 0 and make sure the weights of None
2085 // don't exceed the weight of Likely.
2086 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2087 const uint64_t None = Likely / (NumNone + 1);
2088 const uint64_t Unlikely = 0;
2089
2091 Result.reserve(Likelihoods.size());
2092 for (const auto LH : Likelihoods) {
2093 switch (LH) {
2094 case Stmt::LH_Unlikely:
2095 Result.push_back(Unlikely);
2096 break;
2097 case Stmt::LH_None:
2098 Result.push_back(None);
2099 break;
2100 case Stmt::LH_Likely:
2101 Result.push_back(Likely);
2102 break;
2103 }
2104 }
2105
2106 return Result;
2107}
2108
2110 // Handle nested switch statements.
2111 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2112 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2113 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2114 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2115
2116 // See if we can constant fold the condition of the switch and therefore only
2117 // emit the live case statement (if any) of the switch.
2118 llvm::APSInt ConstantCondValue;
2119 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2121 const SwitchCase *Case = nullptr;
2122 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2123 getContext(), Case)) {
2124 if (Case)
2126 RunCleanupsScope ExecutedScope(*this);
2127
2128 if (S.getInit())
2129 EmitStmt(S.getInit());
2130
2131 // Emit the condition variable if needed inside the entire cleanup scope
2132 // used by this special case for constant folded switches.
2133 if (S.getConditionVariable())
2134 EmitDecl(*S.getConditionVariable());
2135
2136 // At this point, we are no longer "within" a switch instance, so
2137 // we can temporarily enforce this to ensure that any embedded case
2138 // statements are not emitted.
2139 SwitchInsn = nullptr;
2140
2141 // Okay, we can dead code eliminate everything except this case. Emit the
2142 // specified series of statements and we're good.
2143 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2144 EmitStmt(CaseStmts[i]);
2146
2147 // Now we want to restore the saved switch instance so that nested
2148 // switches continue to function properly
2149 SwitchInsn = SavedSwitchInsn;
2150
2151 return;
2152 }
2153 }
2154
2155 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2156
2157 RunCleanupsScope ConditionScope(*this);
2158
2159 if (S.getInit())
2160 EmitStmt(S.getInit());
2161
2162 if (S.getConditionVariable())
2163 EmitDecl(*S.getConditionVariable());
2164 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2165
2166 // Create basic block to hold stuff that comes after switch
2167 // statement. We also need to create a default block now so that
2168 // explicit case ranges tests can have a place to jump to on
2169 // failure.
2170 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2171 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2172 if (PGO.haveRegionCounts()) {
2173 // Walk the SwitchCase list to find how many there are.
2174 uint64_t DefaultCount = 0;
2175 unsigned NumCases = 0;
2176 for (const SwitchCase *Case = S.getSwitchCaseList();
2177 Case;
2178 Case = Case->getNextSwitchCase()) {
2179 if (isa<DefaultStmt>(Case))
2180 DefaultCount = getProfileCount(Case);
2181 NumCases += 1;
2182 }
2183 SwitchWeights = new SmallVector<uint64_t, 16>();
2184 SwitchWeights->reserve(NumCases);
2185 // The default needs to be first. We store the edge count, so we already
2186 // know the right weight.
2187 SwitchWeights->push_back(DefaultCount);
2188 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2189 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2190 // Initialize the default case.
2191 SwitchLikelihood->push_back(Stmt::LH_None);
2192 }
2193
2194 CaseRangeBlock = DefaultBlock;
2195
2196 // Clear the insertion point to indicate we are in unreachable code.
2197 Builder.ClearInsertionPoint();
2198
2199 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2200 // then reuse last ContinueBlock.
2201 JumpDest OuterContinue;
2202 if (!BreakContinueStack.empty())
2203 OuterContinue = BreakContinueStack.back().ContinueBlock;
2204
2205 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2206
2207 // Emit switch body.
2208 EmitStmt(S.getBody());
2209
2210 BreakContinueStack.pop_back();
2211
2212 // Update the default block in case explicit case range tests have
2213 // been chained on top.
2214 SwitchInsn->setDefaultDest(CaseRangeBlock);
2215
2216 // If a default was never emitted:
2217 if (!DefaultBlock->getParent()) {
2218 // If we have cleanups, emit the default block so that there's a
2219 // place to jump through the cleanups from.
2220 if (ConditionScope.requiresCleanups()) {
2221 EmitBlock(DefaultBlock);
2222
2223 // Otherwise, just forward the default block to the switch end.
2224 } else {
2225 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2226 delete DefaultBlock;
2227 }
2228 }
2229
2230 ConditionScope.ForceCleanup();
2231
2232 // Emit continuation.
2233 EmitBlock(SwitchExit.getBlock(), true);
2235
2236 // If the switch has a condition wrapped by __builtin_unpredictable,
2237 // create metadata that specifies that the switch is unpredictable.
2238 // Don't bother if not optimizing because that metadata would not be used.
2239 auto *Call = dyn_cast<CallExpr>(S.getCond());
2240 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2241 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2242 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2243 llvm::MDBuilder MDHelper(getLLVMContext());
2244 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2245 MDHelper.createUnpredictable());
2246 }
2247 }
2248
2249 if (SwitchWeights) {
2250 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2251 "switch weights do not match switch cases");
2252 // If there's only one jump destination there's no sense weighting it.
2253 if (SwitchWeights->size() > 1)
2254 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2255 createProfileWeights(*SwitchWeights));
2256 delete SwitchWeights;
2257 } else if (SwitchLikelihood) {
2258 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2259 "switch likelihoods do not match switch cases");
2260 std::optional<SmallVector<uint64_t, 16>> LHW =
2261 getLikelihoodWeights(*SwitchLikelihood);
2262 if (LHW) {
2263 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2264 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2265 createProfileWeights(*LHW));
2266 }
2267 delete SwitchLikelihood;
2268 }
2269 SwitchInsn = SavedSwitchInsn;
2270 SwitchWeights = SavedSwitchWeights;
2271 SwitchLikelihood = SavedSwitchLikelihood;
2272 CaseRangeBlock = SavedCRBlock;
2273}
2274
2275static std::string
2276SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2278 std::string Result;
2279
2280 while (*Constraint) {
2281 switch (*Constraint) {
2282 default:
2283 Result += Target.convertConstraint(Constraint);
2284 break;
2285 // Ignore these
2286 case '*':
2287 case '?':
2288 case '!':
2289 case '=': // Will see this and the following in mult-alt constraints.
2290 case '+':
2291 break;
2292 case '#': // Ignore the rest of the constraint alternative.
2293 while (Constraint[1] && Constraint[1] != ',')
2294 Constraint++;
2295 break;
2296 case '&':
2297 case '%':
2298 Result += *Constraint;
2299 while (Constraint[1] && Constraint[1] == *Constraint)
2300 Constraint++;
2301 break;
2302 case ',':
2303 Result += "|";
2304 break;
2305 case 'g':
2306 Result += "imr";
2307 break;
2308 case '[': {
2309 assert(OutCons &&
2310 "Must pass output names to constraints with a symbolic name");
2311 unsigned Index;
2312 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2313 assert(result && "Could not resolve symbolic name"); (void)result;
2314 Result += llvm::utostr(Index);
2315 break;
2316 }
2317 }
2318
2319 Constraint++;
2320 }
2321
2322 return Result;
2323}
2324
2325/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2326/// as using a particular register add that as a constraint that will be used
2327/// in this asm stmt.
2328static std::string
2329AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2331 const AsmStmt &Stmt, const bool EarlyClobber,
2332 std::string *GCCReg = nullptr) {
2333 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2334 if (!AsmDeclRef)
2335 return Constraint;
2336 const ValueDecl &Value = *AsmDeclRef->getDecl();
2337 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2338 if (!Variable)
2339 return Constraint;
2341 return Constraint;
2342 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2343 if (!Attr)
2344 return Constraint;
2345 StringRef Register = Attr->getLabel();
2346 assert(Target.isValidGCCRegisterName(Register));
2347 // We're using validateOutputConstraint here because we only care if
2348 // this is a register constraint.
2349 TargetInfo::ConstraintInfo Info(Constraint, "");
2350 if (Target.validateOutputConstraint(Info) &&
2351 !Info.allowsRegister()) {
2352 CGM.ErrorUnsupported(&Stmt, "__asm__");
2353 return Constraint;
2354 }
2355 // Canonicalize the register here before returning it.
2356 Register = Target.getNormalizedGCCRegisterName(Register);
2357 if (GCCReg != nullptr)
2358 *GCCReg = Register.str();
2359 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2360}
2361
2362std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2363 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2364 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2365 if (Info.allowsRegister() || !Info.allowsMemory()) {
2367 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2368
2369 llvm::Type *Ty = ConvertType(InputType);
2370 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2371 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2372 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2373 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2374
2375 return {
2376 Builder.CreateLoad(InputValue.getAddress(*this).withElementType(Ty)),
2377 nullptr};
2378 }
2379 }
2380
2381 Address Addr = InputValue.getAddress(*this);
2382 ConstraintStr += '*';
2383 return {InputValue.getPointer(*this), Addr.getElementType()};
2384}
2385
2386std::pair<llvm::Value *, llvm::Type *>
2387CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2388 const Expr *InputExpr,
2389 std::string &ConstraintStr) {
2390 // If this can't be a register or memory, i.e., has to be a constant
2391 // (immediate or symbolic), try to emit it as such.
2392 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2393 if (Info.requiresImmediateConstant()) {
2394 Expr::EvalResult EVResult;
2395 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2396
2397 llvm::APSInt IntResult;
2398 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2399 getContext()))
2400 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2401 }
2402
2404 if (InputExpr->EvaluateAsInt(Result, getContext()))
2405 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2406 nullptr};
2407 }
2408
2409 if (Info.allowsRegister() || !Info.allowsMemory())
2411 return {EmitScalarExpr(InputExpr), nullptr};
2412 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2413 return {EmitScalarExpr(InputExpr), nullptr};
2414 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2415 LValue Dest = EmitLValue(InputExpr);
2416 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2417 InputExpr->getExprLoc());
2418}
2419
2420/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2421/// asm call instruction. The !srcloc MDNode contains a list of constant
2422/// integers which are the source locations of the start of each line in the
2423/// asm.
2424static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2425 CodeGenFunction &CGF) {
2427 // Add the location of the first line to the MDNode.
2428 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2429 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2430 StringRef StrVal = Str->getString();
2431 if (!StrVal.empty()) {
2433 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2434 unsigned StartToken = 0;
2435 unsigned ByteOffset = 0;
2436
2437 // Add the location of the start of each subsequent line of the asm to the
2438 // MDNode.
2439 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2440 if (StrVal[i] != '\n') continue;
2441 SourceLocation LineLoc = Str->getLocationOfByte(
2442 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2443 Locs.push_back(llvm::ConstantAsMetadata::get(
2444 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2445 }
2446 }
2447
2448 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2449}
2450
2451static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2452 bool HasUnwindClobber, bool ReadOnly,
2453 bool ReadNone, bool NoMerge, const AsmStmt &S,
2454 const std::vector<llvm::Type *> &ResultRegTypes,
2455 const std::vector<llvm::Type *> &ArgElemTypes,
2456 CodeGenFunction &CGF,
2457 std::vector<llvm::Value *> &RegResults) {
2458 if (!HasUnwindClobber)
2459 Result.addFnAttr(llvm::Attribute::NoUnwind);
2460
2461 if (NoMerge)
2462 Result.addFnAttr(llvm::Attribute::NoMerge);
2463 // Attach readnone and readonly attributes.
2464 if (!HasSideEffect) {
2465 if (ReadNone)
2466 Result.setDoesNotAccessMemory();
2467 else if (ReadOnly)
2468 Result.setOnlyReadsMemory();
2469 }
2470
2471 // Add elementtype attribute for indirect constraints.
2472 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2473 if (Pair.value()) {
2474 auto Attr = llvm::Attribute::get(
2475 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2476 Result.addParamAttr(Pair.index(), Attr);
2477 }
2478 }
2479
2480 // Slap the source location of the inline asm into a !srcloc metadata on the
2481 // call.
2482 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2483 Result.setMetadata("srcloc",
2484 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2485 else {
2486 // At least put the line number on MS inline asm blobs.
2487 llvm::Constant *Loc =
2488 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2489 Result.setMetadata("srcloc",
2490 llvm::MDNode::get(CGF.getLLVMContext(),
2491 llvm::ConstantAsMetadata::get(Loc)));
2492 }
2493
2495 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2496 // convergent (meaning, they may call an intrinsically convergent op, such
2497 // as bar.sync, and so can't have certain optimizations applied around
2498 // them).
2499 Result.addFnAttr(llvm::Attribute::Convergent);
2500 // Extract all of the register value results from the asm.
2501 if (ResultRegTypes.size() == 1) {
2502 RegResults.push_back(&Result);
2503 } else {
2504 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2505 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2506 RegResults.push_back(Tmp);
2507 }
2508 }
2509}
2510
2511static void
2513 const llvm::ArrayRef<llvm::Value *> RegResults,
2514 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2515 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2516 const llvm::ArrayRef<LValue> ResultRegDests,
2517 const llvm::ArrayRef<QualType> ResultRegQualTys,
2518 const llvm::BitVector &ResultTypeRequiresCast,
2519 const llvm::BitVector &ResultRegIsFlagReg) {
2521 CodeGenModule &CGM = CGF.CGM;
2522 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2523
2524 assert(RegResults.size() == ResultRegTypes.size());
2525 assert(RegResults.size() == ResultTruncRegTypes.size());
2526 assert(RegResults.size() == ResultRegDests.size());
2527 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2528 // in which case its size may grow.
2529 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2530 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2531
2532 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2533 llvm::Value *Tmp = RegResults[i];
2534 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2535
2536 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2537 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2538 // value.
2539 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2540 llvm::Value *IsBooleanValue =
2541 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2542 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2543 Builder.CreateCall(FnAssume, IsBooleanValue);
2544 }
2545
2546 // If the result type of the LLVM IR asm doesn't match the result type of
2547 // the expression, do the conversion.
2548 if (ResultRegTypes[i] != TruncTy) {
2549
2550 // Truncate the integer result to the right size, note that TruncTy can be
2551 // a pointer.
2552 if (TruncTy->isFloatingPointTy())
2553 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2554 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2555 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2556 Tmp = Builder.CreateTrunc(
2557 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2558 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2559 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2560 uint64_t TmpSize =
2561 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2562 Tmp = Builder.CreatePtrToInt(
2563 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2564 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2565 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2566 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2567 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2568 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2569 }
2570 }
2571
2572 LValue Dest = ResultRegDests[i];
2573 // ResultTypeRequiresCast elements correspond to the first
2574 // ResultTypeRequiresCast.size() elements of RegResults.
2575 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2576 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2577 Address A = Dest.getAddress(CGF).withElementType(ResultRegTypes[i]);
2578 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2579 Builder.CreateStore(Tmp, A);
2580 continue;
2581 }
2582
2583 QualType Ty =
2584 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2585 if (Ty.isNull()) {
2586 const Expr *OutExpr = S.getOutputExpr(i);
2587 CGM.getDiags().Report(OutExpr->getExprLoc(),
2588 diag::err_store_value_to_reg);
2589 return;
2590 }
2591 Dest = CGF.MakeAddrLValue(A, Ty);
2592 }
2593 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2594 }
2595}
2596
2598 const AsmStmt &S) {
2599 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2600
2601 StringRef Asm;
2602 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2603 Asm = GCCAsm->getAsmString()->getString();
2604
2605 auto &Ctx = CGF->CGM.getLLVMContext();
2606
2607 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2608 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2609 {StrTy->getType()}, false);
2610 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2611
2612 CGF->Builder.CreateCall(UBF, {StrTy});
2613}
2614
2616 // Pop all cleanup blocks at the end of the asm statement.
2617 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2618
2619 // Assemble the final asm string.
2620 std::string AsmString = S.generateAsmString(getContext());
2621
2622 // Get all the output and input constraints together.
2623 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2624 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2625
2626 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2627 bool IsValidTargetAsm = true;
2628 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2629 StringRef Name;
2630 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2631 Name = GAS->getOutputName(i);
2632 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2633 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2634 if (IsHipStdPar && !IsValid)
2635 IsValidTargetAsm = false;
2636 else
2637 assert(IsValid && "Failed to parse output constraint");
2638 OutputConstraintInfos.push_back(Info);
2639 }
2640
2641 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2642 StringRef Name;
2643 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2644 Name = GAS->getInputName(i);
2645 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2646 bool IsValid =
2647 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2648 if (IsHipStdPar && !IsValid)
2649 IsValidTargetAsm = false;
2650 else
2651 assert(IsValid && "Failed to parse input constraint");
2652 InputConstraintInfos.push_back(Info);
2653 }
2654
2655 if (!IsValidTargetAsm)
2656 return EmitHipStdParUnsupportedAsm(this, S);
2657
2658 std::string Constraints;
2659
2660 std::vector<LValue> ResultRegDests;
2661 std::vector<QualType> ResultRegQualTys;
2662 std::vector<llvm::Type *> ResultRegTypes;
2663 std::vector<llvm::Type *> ResultTruncRegTypes;
2664 std::vector<llvm::Type *> ArgTypes;
2665 std::vector<llvm::Type *> ArgElemTypes;
2666 std::vector<llvm::Value*> Args;
2667 llvm::BitVector ResultTypeRequiresCast;
2668 llvm::BitVector ResultRegIsFlagReg;
2669
2670 // Keep track of inout constraints.
2671 std::string InOutConstraints;
2672 std::vector<llvm::Value*> InOutArgs;
2673 std::vector<llvm::Type*> InOutArgTypes;
2674 std::vector<llvm::Type*> InOutArgElemTypes;
2675
2676 // Keep track of out constraints for tied input operand.
2677 std::vector<std::string> OutputConstraints;
2678
2679 // Keep track of defined physregs.
2680 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2681
2682 // An inline asm can be marked readonly if it meets the following conditions:
2683 // - it doesn't have any sideeffects
2684 // - it doesn't clobber memory
2685 // - it doesn't return a value by-reference
2686 // It can be marked readnone if it doesn't have any input memory constraints
2687 // in addition to meeting the conditions listed above.
2688 bool ReadOnly = true, ReadNone = true;
2689
2690 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2691 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2692
2693 // Simplify the output constraint.
2694 std::string OutputConstraint(S.getOutputConstraint(i));
2695 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2696 getTarget(), &OutputConstraintInfos);
2697
2698 const Expr *OutExpr = S.getOutputExpr(i);
2699 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2700
2701 std::string GCCReg;
2702 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2703 getTarget(), CGM, S,
2704 Info.earlyClobber(),
2705 &GCCReg);
2706 // Give an error on multiple outputs to same physreg.
2707 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2708 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2709
2710 OutputConstraints.push_back(OutputConstraint);
2711 LValue Dest = EmitLValue(OutExpr);
2712 if (!Constraints.empty())
2713 Constraints += ',';
2714
2715 // If this is a register output, then make the inline asm return it
2716 // by-value. If this is a memory result, return the value by-reference.
2717 QualType QTy = OutExpr->getType();
2718 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2720 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2721
2722 Constraints += "=" + OutputConstraint;
2723 ResultRegQualTys.push_back(QTy);
2724 ResultRegDests.push_back(Dest);
2725
2726 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2727 ResultRegIsFlagReg.push_back(IsFlagReg);
2728
2729 llvm::Type *Ty = ConvertTypeForMem(QTy);
2730 const bool RequiresCast = Info.allowsRegister() &&
2732 Ty->isAggregateType());
2733
2734 ResultTruncRegTypes.push_back(Ty);
2735 ResultTypeRequiresCast.push_back(RequiresCast);
2736
2737 if (RequiresCast) {
2738 unsigned Size = getContext().getTypeSize(QTy);
2739 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2740 }
2741 ResultRegTypes.push_back(Ty);
2742 // If this output is tied to an input, and if the input is larger, then
2743 // we need to set the actual result type of the inline asm node to be the
2744 // same as the input type.
2745 if (Info.hasMatchingInput()) {
2746 unsigned InputNo;
2747 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2748 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2749 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2750 break;
2751 }
2752 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2753
2754 QualType InputTy = S.getInputExpr(InputNo)->getType();
2755 QualType OutputType = OutExpr->getType();
2756
2757 uint64_t InputSize = getContext().getTypeSize(InputTy);
2758 if (getContext().getTypeSize(OutputType) < InputSize) {
2759 // Form the asm to return the value as a larger integer or fp type.
2760 ResultRegTypes.back() = ConvertType(InputTy);
2761 }
2762 }
2763 if (llvm::Type* AdjTy =
2764 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2765 ResultRegTypes.back()))
2766 ResultRegTypes.back() = AdjTy;
2767 else {
2768 CGM.getDiags().Report(S.getAsmLoc(),
2769 diag::err_asm_invalid_type_in_input)
2770 << OutExpr->getType() << OutputConstraint;
2771 }
2772
2773 // Update largest vector width for any vector types.
2774 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2775 LargestVectorWidth =
2776 std::max((uint64_t)LargestVectorWidth,
2777 VT->getPrimitiveSizeInBits().getKnownMinValue());
2778 } else {
2779 Address DestAddr = Dest.getAddress(*this);
2780 // Matrix types in memory are represented by arrays, but accessed through
2781 // vector pointers, with the alignment specified on the access operation.
2782 // For inline assembly, update pointer arguments to use vector pointers.
2783 // Otherwise there will be a mis-match if the matrix is also an
2784 // input-argument which is represented as vector.
2785 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2786 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2787
2788 ArgTypes.push_back(DestAddr.getType());
2789 ArgElemTypes.push_back(DestAddr.getElementType());
2790 Args.push_back(DestAddr.emitRawPointer(*this));
2791 Constraints += "=*";
2792 Constraints += OutputConstraint;
2793 ReadOnly = ReadNone = false;
2794 }
2795
2796 if (Info.isReadWrite()) {
2797 InOutConstraints += ',';
2798
2799 const Expr *InputExpr = S.getOutputExpr(i);
2800 llvm::Value *Arg;
2801 llvm::Type *ArgElemType;
2802 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2803 Info, Dest, InputExpr->getType(), InOutConstraints,
2804 InputExpr->getExprLoc());
2805
2806 if (llvm::Type* AdjTy =
2807 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2808 Arg->getType()))
2809 Arg = Builder.CreateBitCast(Arg, AdjTy);
2810
2811 // Update largest vector width for any vector types.
2812 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2813 LargestVectorWidth =
2814 std::max((uint64_t)LargestVectorWidth,
2815 VT->getPrimitiveSizeInBits().getKnownMinValue());
2816 // Only tie earlyclobber physregs.
2817 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2818 InOutConstraints += llvm::utostr(i);
2819 else
2820 InOutConstraints += OutputConstraint;
2821
2822 InOutArgTypes.push_back(Arg->getType());
2823 InOutArgElemTypes.push_back(ArgElemType);
2824 InOutArgs.push_back(Arg);
2825 }
2826 }
2827
2828 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2829 // to the return value slot. Only do this when returning in registers.
2830 if (isa<MSAsmStmt>(&S)) {
2831 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2832 if (RetAI.isDirect() || RetAI.isExtend()) {
2833 // Make a fake lvalue for the return value slot.
2836 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2837 ResultRegDests, AsmString, S.getNumOutputs());
2838 SawAsmBlock = true;
2839 }
2840 }
2841
2842 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2843 const Expr *InputExpr = S.getInputExpr(i);
2844
2845 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2846
2847 if (Info.allowsMemory())
2848 ReadNone = false;
2849
2850 if (!Constraints.empty())
2851 Constraints += ',';
2852
2853 // Simplify the input constraint.
2854 std::string InputConstraint(S.getInputConstraint(i));
2855 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2856 &OutputConstraintInfos);
2857
2858 InputConstraint = AddVariableConstraints(
2859 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2860 getTarget(), CGM, S, false /* No EarlyClobber */);
2861
2862 std::string ReplaceConstraint (InputConstraint);
2863 llvm::Value *Arg;
2864 llvm::Type *ArgElemType;
2865 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2866
2867 // If this input argument is tied to a larger output result, extend the
2868 // input to be the same size as the output. The LLVM backend wants to see
2869 // the input and output of a matching constraint be the same size. Note
2870 // that GCC does not define what the top bits are here. We use zext because
2871 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2872 if (Info.hasTiedOperand()) {
2873 unsigned Output = Info.getTiedOperand();
2874 QualType OutputType = S.getOutputExpr(Output)->getType();
2875 QualType InputTy = InputExpr->getType();
2876
2877 if (getContext().getTypeSize(OutputType) >
2878 getContext().getTypeSize(InputTy)) {
2879 // Use ptrtoint as appropriate so that we can do our extension.
2880 if (isa<llvm::PointerType>(Arg->getType()))
2881 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2882 llvm::Type *OutputTy = ConvertType(OutputType);
2883 if (isa<llvm::IntegerType>(OutputTy))
2884 Arg = Builder.CreateZExt(Arg, OutputTy);
2885 else if (isa<llvm::PointerType>(OutputTy))
2886 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2887 else if (OutputTy->isFloatingPointTy())
2888 Arg = Builder.CreateFPExt(Arg, OutputTy);
2889 }
2890 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2891 ReplaceConstraint = OutputConstraints[Output];
2892 }
2893 if (llvm::Type* AdjTy =
2894 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2895 Arg->getType()))
2896 Arg = Builder.CreateBitCast(Arg, AdjTy);
2897 else
2898 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2899 << InputExpr->getType() << InputConstraint;
2900
2901 // Update largest vector width for any vector types.
2902 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2903 LargestVectorWidth =
2904 std::max((uint64_t)LargestVectorWidth,
2905 VT->getPrimitiveSizeInBits().getKnownMinValue());
2906
2907 ArgTypes.push_back(Arg->getType());
2908 ArgElemTypes.push_back(ArgElemType);
2909 Args.push_back(Arg);
2910 Constraints += InputConstraint;
2911 }
2912
2913 // Append the "input" part of inout constraints.
2914 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2915 ArgTypes.push_back(InOutArgTypes[i]);
2916 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2917 Args.push_back(InOutArgs[i]);
2918 }
2919 Constraints += InOutConstraints;
2920
2921 // Labels
2923 llvm::BasicBlock *Fallthrough = nullptr;
2924 bool IsGCCAsmGoto = false;
2925 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2926 IsGCCAsmGoto = GS->isAsmGoto();
2927 if (IsGCCAsmGoto) {
2928 for (const auto *E : GS->labels()) {
2929 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2930 Transfer.push_back(Dest.getBlock());
2931 if (!Constraints.empty())
2932 Constraints += ',';
2933 Constraints += "!i";
2934 }
2935 Fallthrough = createBasicBlock("asm.fallthrough");
2936 }
2937 }
2938
2939 bool HasUnwindClobber = false;
2940
2941 // Clobbers
2942 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2943 StringRef Clobber = S.getClobber(i);
2944
2945 if (Clobber == "memory")
2946 ReadOnly = ReadNone = false;
2947 else if (Clobber == "unwind") {
2948 HasUnwindClobber = true;
2949 continue;
2950 } else if (Clobber != "cc") {
2951 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2952 if (CGM.getCodeGenOpts().StackClashProtector &&
2953 getTarget().isSPRegName(Clobber)) {
2954 CGM.getDiags().Report(S.getAsmLoc(),
2955 diag::warn_stack_clash_protection_inline_asm);
2956 }
2957 }
2958
2959 if (isa<MSAsmStmt>(&S)) {
2960 if (Clobber == "eax" || Clobber == "edx") {
2961 if (Constraints.find("=&A") != std::string::npos)
2962 continue;
2963 std::string::size_type position1 =
2964 Constraints.find("={" + Clobber.str() + "}");
2965 if (position1 != std::string::npos) {
2966 Constraints.insert(position1 + 1, "&");
2967 continue;
2968 }
2969 std::string::size_type position2 = Constraints.find("=A");
2970 if (position2 != std::string::npos) {
2971 Constraints.insert(position2 + 1, "&");
2972 continue;
2973 }
2974 }
2975 }
2976 if (!Constraints.empty())
2977 Constraints += ',';
2978
2979 Constraints += "~{";
2980 Constraints += Clobber;
2981 Constraints += '}';
2982 }
2983
2984 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2985 "unwind clobber can't be used with asm goto");
2986
2987 // Add machine specific clobbers
2988 std::string_view MachineClobbers = getTarget().getClobbers();
2989 if (!MachineClobbers.empty()) {
2990 if (!Constraints.empty())
2991 Constraints += ',';
2992 Constraints += MachineClobbers;
2993 }
2994
2995 llvm::Type *ResultType;
2996 if (ResultRegTypes.empty())
2997 ResultType = VoidTy;
2998 else if (ResultRegTypes.size() == 1)
2999 ResultType = ResultRegTypes[0];
3000 else
3001 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3002
3003 llvm::FunctionType *FTy =
3004 llvm::FunctionType::get(ResultType, ArgTypes, false);
3005
3006 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3007
3008 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3009 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3010 ? llvm::InlineAsm::AD_ATT
3011 : llvm::InlineAsm::AD_Intel;
3012 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3013 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3014
3015 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3016 FTy, AsmString, Constraints, HasSideEffect,
3017 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3018 std::vector<llvm::Value*> RegResults;
3019 llvm::CallBrInst *CBR;
3020 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3021 CBRRegResults;
3022 if (IsGCCAsmGoto) {
3023 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3024 EmitBlock(Fallthrough);
3025 UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone,
3026 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
3027 *this, RegResults);
3028 // Because we are emitting code top to bottom, we don't have enough
3029 // information at this point to know precisely whether we have a critical
3030 // edge. If we have outputs, split all indirect destinations.
3031 if (!RegResults.empty()) {
3032 unsigned i = 0;
3033 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3034 llvm::Twine SynthName = Dest->getName() + ".split";
3035 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3036 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3037 Builder.SetInsertPoint(SynthBB);
3038
3039 if (ResultRegTypes.size() == 1) {
3040 CBRRegResults[SynthBB].push_back(CBR);
3041 } else {
3042 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3043 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3044 CBRRegResults[SynthBB].push_back(Tmp);
3045 }
3046 }
3047
3048 EmitBranch(Dest);
3049 EmitBlock(SynthBB);
3050 CBR->setIndirectDest(i++, SynthBB);
3051 }
3052 }
3053 } else if (HasUnwindClobber) {
3054 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3055 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
3056 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
3057 *this, RegResults);
3058 } else {
3059 llvm::CallInst *Result =
3060 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3061 UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
3062 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
3063 *this, RegResults);
3064 }
3065
3066 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3067 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3068 ResultRegIsFlagReg);
3069
3070 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3071 // different insertion point; one for each indirect destination and with
3072 // CBRRegResults rather than RegResults.
3073 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3074 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3075 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3076 Builder.SetInsertPoint(Succ, --(Succ->end()));
3077 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3078 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3079 ResultTypeRequiresCast, ResultRegIsFlagReg);
3080 }
3081 }
3082}
3083
3085 const RecordDecl *RD = S.getCapturedRecordDecl();
3086 QualType RecordTy = getContext().getRecordType(RD);
3087
3088 // Initialize the captured struct.
3089 LValue SlotLV =
3090 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3091
3092 RecordDecl::field_iterator CurField = RD->field_begin();
3093 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3094 E = S.capture_init_end();
3095 I != E; ++I, ++CurField) {
3096 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3097 if (CurField->hasCapturedVLAType()) {
3098 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3099 } else {
3100 EmitInitializerForField(*CurField, LV, *I);
3101 }
3102 }
3103
3104 return SlotLV;
3105}
3106
3107/// Generate an outlined function for the body of a CapturedStmt, store any
3108/// captured variables into the captured struct, and call the outlined function.
3109llvm::Function *
3111 LValue CapStruct = InitCapturedStruct(S);
3112
3113 // Emit the CapturedDecl
3114 CodeGenFunction CGF(CGM, true);
3115 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3116 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3117 delete CGF.CapturedStmtInfo;
3118
3119 // Emit call to the helper function.
3120 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3121
3122 return F;
3123}
3124
3126 LValue CapStruct = InitCapturedStruct(S);
3127 return CapStruct.getAddress(*this);
3128}
3129
3130/// Creates the outlined function for a CapturedStmt.
3131llvm::Function *
3133 assert(CapturedStmtInfo &&
3134 "CapturedStmtInfo should be set when generating the captured function");
3135 const CapturedDecl *CD = S.getCapturedDecl();
3136 const RecordDecl *RD = S.getCapturedRecordDecl();
3137 SourceLocation Loc = S.getBeginLoc();
3138 assert(CD->hasBody() && "missing CapturedDecl body");
3139
3140 // Build the argument list.
3141 ASTContext &Ctx = CGM.getContext();
3142 FunctionArgList Args;
3143 Args.append(CD->param_begin(), CD->param_end());
3144
3145 // Create the function declaration.
3146 const CGFunctionInfo &FuncInfo =
3148 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3149
3150 llvm::Function *F =
3151 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3153 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3154 if (CD->isNothrow())
3155 F->addFnAttr(llvm::Attribute::NoUnwind);
3156
3157 // Generate the function.
3158 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3159 CD->getBody()->getBeginLoc());
3160 // Set the context parameter in CapturedStmtInfo.
3161 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3163
3164 // Initialize variable-length arrays.
3167 for (auto *FD : RD->fields()) {
3168 if (FD->hasCapturedVLAType()) {
3169 auto *ExprArg =
3170 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
3171 .getScalarVal();
3172 auto VAT = FD->getCapturedVLAType();
3173 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3174 }
3175 }
3176
3177 // If 'this' is captured, load it into CXXThisValue.
3180 LValue ThisLValue = EmitLValueForField(Base, FD);
3181 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3182 }
3183
3184 PGO.assignRegionCounters(GlobalDecl(CD), F);
3185 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3187
3188 return F;
3189}
3190
3191namespace {
3192// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3193// std::nullptr otherwise.
3194llvm::IntrinsicInst *getConvergenceToken(llvm::BasicBlock *BB) {
3195 for (auto &I : *BB) {
3196 auto *II = dyn_cast<llvm::IntrinsicInst>(&I);
3197 if (II && llvm::isConvergenceControlIntrinsic(II->getIntrinsicID()))
3198 return II;
3199 }
3200 return nullptr;
3201}
3202
3203} // namespace
3204
3205llvm::CallBase *
3206CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input,
3207 llvm::Value *ParentToken) {
3208 llvm::Value *bundleArgs[] = {ParentToken};
3209 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3210 auto Output = llvm::CallBase::addOperandBundle(
3211 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input);
3212 Input->replaceAllUsesWith(Output);
3213 Input->eraseFromParent();
3214 return Output;
3215}
3216
3217llvm::IntrinsicInst *
3218CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB,
3219 llvm::Value *ParentToken) {
3220 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3221 if (BB->empty())
3222 Builder.SetInsertPoint(BB);
3223 else
3224 Builder.SetInsertPoint(BB->getFirstInsertionPt());
3225
3226 llvm::CallBase *CB = Builder.CreateIntrinsic(
3227 llvm::Intrinsic::experimental_convergence_loop, {}, {});
3228 Builder.restoreIP(IP);
3229
3230 llvm::CallBase *I = addConvergenceControlToken(CB, ParentToken);
3231 return cast<llvm::IntrinsicInst>(I);
3232}
3233
3234llvm::IntrinsicInst *
3235CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3236 llvm::BasicBlock *BB = &F->getEntryBlock();
3237 llvm::IntrinsicInst *Token = getConvergenceToken(BB);
3238 if (Token)
3239 return Token;
3240
3241 // Adding a convergence token requires the function to be marked as
3242 // convergent.
3243 F->setConvergent();
3244
3245 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3246 Builder.SetInsertPoint(&BB->front());
3247 llvm::CallBase *I = Builder.CreateIntrinsic(
3248 llvm::Intrinsic::experimental_convergence_entry, {}, {});
3249 assert(isa<llvm::IntrinsicInst>(I));
3250 Builder.restoreIP(IP);
3251
3252 return cast<llvm::IntrinsicInst>(I);
3253}
#define V(N, I)
Definition: ASTContext.h:3285
#define SM(sm)
Definition: Cuda.cpp:83
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2329
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:2000
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2597
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:2054
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2424
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2276
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition: CGStmt.cpp:1431
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2451
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1845
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2512
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition: CGStmt.cpp:961
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1844
@ CSFC_Failure
Definition: CGStmt.cpp:1844
@ CSFC_Success
Definition: CGStmt.cpp:1844
@ CSFC_FallThrough
Definition: CGStmt.cpp:1844
llvm::MachO::Target Target
Definition: MachO.h:50
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
SourceRange Range
Definition: SemaObjC.cpp:754
VarDecl * Variable
Definition: SemaObjC.cpp:753
SourceLocation Loc
Definition: SemaObjC.cpp:755
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:954
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
SourceManager & getSourceManager()
Definition: ASTContext.h:705
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2341
CanQualType VoidTy
Definition: ASTContext.h:1091
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3100
Attr - This represents one attribute.
Definition: Attr.h:42
Represents an attribute applied to a statement.
Definition: Stmt.h:2080
BreakStmt - This represents a break.
Definition: Stmt.h:2980
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2820
Expr * getCallee()
Definition: Expr.h:2970
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4686
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4748
bool isNothrow() const
Definition: Decl.cpp:5443
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4765
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4763
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5440
This captures a statement into a function.
Definition: Stmt.h:3757
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3921
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1422
CaseStmt - Represent a case statement.
Definition: Stmt.h:1801
Stmt * getSubStmt()
Definition: Stmt.h:1918
Expr * getLHS()
Definition: Stmt.h:1888
Expr * getRHS()
Definition: Stmt.h:1900
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
static Address invalid()
Definition: Address.h:153
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:220
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:184
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
bool isValid() const
Definition: Address.h:154
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:176
An aggregate value slot.
Definition: CGValue.h:512
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:595
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:881
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:164
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:55
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:680
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitIfStmt(const IfStmt &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
SmallVector< llvm::IntrinsicInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
const TargetCodeGenInfo & getTargetHooks() const
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:53
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1632
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:680
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:364
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:352
LValue - This represents an lvalue references.
Definition: CGValue.h:181
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:370
llvm::Value * getPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:361
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:823
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:41
bool isScalar() const
Definition: CGValue.h:63
static RValue get(llvm::Value *V)
Definition: CGValue.h:97
bool isAggregate() const
Definition: CGValue.h:65
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:82
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:70
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:77
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:185
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:179
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1606
Stmt *const * const_body_iterator
Definition: Stmt.h:1673
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1072
ContinueStmt - This represents a continue.
Definition: Stmt.h:2950
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2342
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1260
ValueDecl * getDecl()
Definition: Expr.h:1328
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1497
T * getAttr() const
Definition: DeclBase.h:579
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1045
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1083
SourceLocation getLocation() const
Definition: DeclBase.h:445
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1547
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2725
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3086
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3055
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3556
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3057
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2781
const Expr * getSubExpr() const
Definition: Expr.h:1052
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:4256
CallingConv getCallConv() const
Definition: Type.h:4584
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3259
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2862
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2138
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2901
Represents the declaration of a label.
Definition: Decl.h:499
LabelStmt * getStmt() const
Definition: Decl.h:523
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2031
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:461
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:642
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:276
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:940
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:1007
QualType getCanonicalType() const
Definition: Type.h:7411
The collection of all-type qualifiers we support.
Definition: Type.h:318
Represents a struct/union/class.
Definition: Decl.h:4168
field_range fields() const
Definition: Decl.h:4374
field_iterator field_begin() const
Definition: Decl.cpp:5069
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3019
Expr * getRetValue()
Definition: Stmt.h:3050
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
@ NoStmtClass
Definition: Stmt.h:87
StmtClass getStmtClass() const
Definition: Stmt.h:1358
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1301
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1302
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1303
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1305
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:163
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:338
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:155
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1773
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1954
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1329
StringRef getString() const
Definition: Expr.h:1850
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1774
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2388
Exposes information about the current target.
Definition: TargetInfo.h:218
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:822
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:684
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:725
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Token - This structure provides full information about a lexed token.
Definition: Token.h:36
bool isVoidType() const
Definition: Type.h:7905
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8193
bool isReferenceType() const
Definition: Type.h:7624
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:695
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:706
Represents a variable declaration or definition.
Definition: Decl.h:918
StorageClass getStorageClass() const
Returns the storage class as written in the source.
Definition: Decl.h:1155
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2584
Defines the clang::TargetInfo interface.
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:419
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:254
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:291
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1125
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1132