clang 20.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46namespace llvm {
47extern cl::opt<bool> EnableSingleByteCoverage;
48} // namespace llvm
49
50void CodeGenFunction::EmitStopPoint(const Stmt *S) {
51 if (CGDebugInfo *DI = getDebugInfo()) {
53 Loc = S->getBeginLoc();
54 DI->EmitLocation(Builder, Loc);
55
56 LastStopPoint = Loc;
57 }
58}
59
61 assert(S && "Null statement?");
62 PGO.setCurrentStmt(S);
63
64 // These statements have their own debug info handling.
65 if (EmitSimpleStmt(S, Attrs))
66 return;
67
68 // Check if we are generating unreachable code.
69 if (!HaveInsertPoint()) {
70 // If so, and the statement doesn't contain a label, then we do not need to
71 // generate actual code. This is safe because (1) the current point is
72 // unreachable, so we don't need to execute the code, and (2) we've already
73 // handled the statements which update internal data structures (like the
74 // local variable map) which could be used by subsequent statements.
75 if (!ContainsLabel(S)) {
76 // Verify that any decl statements were handled as simple, they may be in
77 // scope of subsequent reachable statements.
78 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
79 return;
80 }
81
82 // Otherwise, make a new block to hold the code.
84 }
85
86 // Generate a stoppoint if we are emitting debug info.
88
89 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
90 // enabled.
91 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
92 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
94 return;
95 }
96 }
97
98 switch (S->getStmtClass()) {
100 case Stmt::CXXCatchStmtClass:
101 case Stmt::SEHExceptStmtClass:
102 case Stmt::SEHFinallyStmtClass:
103 case Stmt::MSDependentExistsStmtClass:
104 llvm_unreachable("invalid statement class to emit generically");
105 case Stmt::NullStmtClass:
106 case Stmt::CompoundStmtClass:
107 case Stmt::DeclStmtClass:
108 case Stmt::LabelStmtClass:
109 case Stmt::AttributedStmtClass:
110 case Stmt::GotoStmtClass:
111 case Stmt::BreakStmtClass:
112 case Stmt::ContinueStmtClass:
113 case Stmt::DefaultStmtClass:
114 case Stmt::CaseStmtClass:
115 case Stmt::SEHLeaveStmtClass:
116 llvm_unreachable("should have emitted these statements as simple");
117
118#define STMT(Type, Base)
119#define ABSTRACT_STMT(Op)
120#define EXPR(Type, Base) \
121 case Stmt::Type##Class:
122#include "clang/AST/StmtNodes.inc"
123 {
124 // Remember the block we came in on.
125 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
126 assert(incoming && "expression emission must have an insertion point");
127
128 EmitIgnoredExpr(cast<Expr>(S));
129
130 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
131 assert(outgoing && "expression emission cleared block!");
132
133 // The expression emitters assume (reasonably!) that the insertion
134 // point is always set. To maintain that, the call-emission code
135 // for noreturn functions has to enter a new block with no
136 // predecessors. We want to kill that block and mark the current
137 // insertion point unreachable in the common case of a call like
138 // "exit();". Since expression emission doesn't otherwise create
139 // blocks with no predecessors, we can just test for that.
140 // However, we must be careful not to do this to our incoming
141 // block, because *statement* emission does sometimes create
142 // reachable blocks which will have no predecessors until later in
143 // the function. This occurs with, e.g., labels that are not
144 // reachable by fallthrough.
145 if (incoming != outgoing && outgoing->use_empty()) {
146 outgoing->eraseFromParent();
147 Builder.ClearInsertionPoint();
148 }
149 break;
150 }
151
152 case Stmt::IndirectGotoStmtClass:
153 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
154
155 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
156 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
157 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
158 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
159
160 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
161
162 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
163 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
164 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
165 case Stmt::CoroutineBodyStmtClass:
166 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
167 break;
168 case Stmt::CoreturnStmtClass:
169 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
170 break;
171 case Stmt::CapturedStmtClass: {
172 const CapturedStmt *CS = cast<CapturedStmt>(S);
174 }
175 break;
176 case Stmt::ObjCAtTryStmtClass:
177 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
178 break;
179 case Stmt::ObjCAtCatchStmtClass:
180 llvm_unreachable(
181 "@catch statements should be handled by EmitObjCAtTryStmt");
182 case Stmt::ObjCAtFinallyStmtClass:
183 llvm_unreachable(
184 "@finally statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtThrowStmtClass:
186 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
187 break;
188 case Stmt::ObjCAtSynchronizedStmtClass:
189 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
190 break;
191 case Stmt::ObjCForCollectionStmtClass:
192 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
193 break;
194 case Stmt::ObjCAutoreleasePoolStmtClass:
195 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
196 break;
197
198 case Stmt::CXXTryStmtClass:
199 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
200 break;
201 case Stmt::CXXForRangeStmtClass:
202 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
203 break;
204 case Stmt::SEHTryStmtClass:
205 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
206 break;
207 case Stmt::OMPMetaDirectiveClass:
208 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
209 break;
210 case Stmt::OMPCanonicalLoopClass:
211 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
212 break;
213 case Stmt::OMPParallelDirectiveClass:
214 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
215 break;
216 case Stmt::OMPSimdDirectiveClass:
217 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
218 break;
219 case Stmt::OMPTileDirectiveClass:
220 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
221 break;
222 case Stmt::OMPUnrollDirectiveClass:
223 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
224 break;
225 case Stmt::OMPReverseDirectiveClass:
226 EmitOMPReverseDirective(cast<OMPReverseDirective>(*S));
227 break;
228 case Stmt::OMPInterchangeDirectiveClass:
229 EmitOMPInterchangeDirective(cast<OMPInterchangeDirective>(*S));
230 break;
231 case Stmt::OMPForDirectiveClass:
232 EmitOMPForDirective(cast<OMPForDirective>(*S));
233 break;
234 case Stmt::OMPForSimdDirectiveClass:
235 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
236 break;
237 case Stmt::OMPSectionsDirectiveClass:
238 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
239 break;
240 case Stmt::OMPSectionDirectiveClass:
241 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
242 break;
243 case Stmt::OMPSingleDirectiveClass:
244 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
245 break;
246 case Stmt::OMPMasterDirectiveClass:
247 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
248 break;
249 case Stmt::OMPCriticalDirectiveClass:
250 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
251 break;
252 case Stmt::OMPParallelForDirectiveClass:
253 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
254 break;
255 case Stmt::OMPParallelForSimdDirectiveClass:
256 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
257 break;
258 case Stmt::OMPParallelMasterDirectiveClass:
259 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
260 break;
261 case Stmt::OMPParallelSectionsDirectiveClass:
262 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
263 break;
264 case Stmt::OMPTaskDirectiveClass:
265 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
266 break;
267 case Stmt::OMPTaskyieldDirectiveClass:
268 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
269 break;
270 case Stmt::OMPErrorDirectiveClass:
271 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
272 break;
273 case Stmt::OMPBarrierDirectiveClass:
274 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
275 break;
276 case Stmt::OMPTaskwaitDirectiveClass:
277 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
278 break;
279 case Stmt::OMPTaskgroupDirectiveClass:
280 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
281 break;
282 case Stmt::OMPFlushDirectiveClass:
283 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
284 break;
285 case Stmt::OMPDepobjDirectiveClass:
286 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
287 break;
288 case Stmt::OMPScanDirectiveClass:
289 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
290 break;
291 case Stmt::OMPOrderedDirectiveClass:
292 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
293 break;
294 case Stmt::OMPAtomicDirectiveClass:
295 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
296 break;
297 case Stmt::OMPTargetDirectiveClass:
298 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
299 break;
300 case Stmt::OMPTeamsDirectiveClass:
301 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
302 break;
303 case Stmt::OMPCancellationPointDirectiveClass:
304 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
305 break;
306 case Stmt::OMPCancelDirectiveClass:
307 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
308 break;
309 case Stmt::OMPTargetDataDirectiveClass:
310 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
311 break;
312 case Stmt::OMPTargetEnterDataDirectiveClass:
313 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
314 break;
315 case Stmt::OMPTargetExitDataDirectiveClass:
316 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
317 break;
318 case Stmt::OMPTargetParallelDirectiveClass:
319 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
320 break;
321 case Stmt::OMPTargetParallelForDirectiveClass:
322 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
323 break;
324 case Stmt::OMPTaskLoopDirectiveClass:
325 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
326 break;
327 case Stmt::OMPTaskLoopSimdDirectiveClass:
328 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
329 break;
330 case Stmt::OMPMasterTaskLoopDirectiveClass:
331 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
332 break;
333 case Stmt::OMPMaskedTaskLoopDirectiveClass:
334 llvm_unreachable("masked taskloop directive not supported yet.");
335 break;
336 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
338 cast<OMPMasterTaskLoopSimdDirective>(*S));
339 break;
340 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
341 llvm_unreachable("masked taskloop simd directive not supported yet.");
342 break;
343 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
345 cast<OMPParallelMasterTaskLoopDirective>(*S));
346 break;
347 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
348 llvm_unreachable("parallel masked taskloop directive not supported yet.");
349 break;
350 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
352 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
353 break;
354 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
355 llvm_unreachable(
356 "parallel masked taskloop simd directive not supported yet.");
357 break;
358 case Stmt::OMPDistributeDirectiveClass:
359 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
360 break;
361 case Stmt::OMPTargetUpdateDirectiveClass:
362 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
363 break;
364 case Stmt::OMPDistributeParallelForDirectiveClass:
366 cast<OMPDistributeParallelForDirective>(*S));
367 break;
368 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
370 cast<OMPDistributeParallelForSimdDirective>(*S));
371 break;
372 case Stmt::OMPDistributeSimdDirectiveClass:
373 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
374 break;
375 case Stmt::OMPTargetParallelForSimdDirectiveClass:
377 cast<OMPTargetParallelForSimdDirective>(*S));
378 break;
379 case Stmt::OMPTargetSimdDirectiveClass:
380 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
381 break;
382 case Stmt::OMPTeamsDistributeDirectiveClass:
383 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
384 break;
385 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
387 cast<OMPTeamsDistributeSimdDirective>(*S));
388 break;
389 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
391 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
392 break;
393 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
395 cast<OMPTeamsDistributeParallelForDirective>(*S));
396 break;
397 case Stmt::OMPTargetTeamsDirectiveClass:
398 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
399 break;
400 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
402 cast<OMPTargetTeamsDistributeDirective>(*S));
403 break;
404 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
406 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
407 break;
408 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
410 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
411 break;
412 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
414 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
415 break;
416 case Stmt::OMPInteropDirectiveClass:
417 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
418 break;
419 case Stmt::OMPDispatchDirectiveClass:
420 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
421 break;
422 case Stmt::OMPScopeDirectiveClass:
423 EmitOMPScopeDirective(cast<OMPScopeDirective>(*S));
424 break;
425 case Stmt::OMPMaskedDirectiveClass:
426 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
427 break;
428 case Stmt::OMPGenericLoopDirectiveClass:
429 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
430 break;
431 case Stmt::OMPTeamsGenericLoopDirectiveClass:
432 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
433 break;
434 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
436 cast<OMPTargetTeamsGenericLoopDirective>(*S));
437 break;
438 case Stmt::OMPParallelGenericLoopDirectiveClass:
440 cast<OMPParallelGenericLoopDirective>(*S));
441 break;
442 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
444 cast<OMPTargetParallelGenericLoopDirective>(*S));
445 break;
446 case Stmt::OMPParallelMaskedDirectiveClass:
447 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
448 break;
449 case Stmt::OMPAssumeDirectiveClass:
450 EmitOMPAssumeDirective(cast<OMPAssumeDirective>(*S));
451 break;
452 case Stmt::OpenACCComputeConstructClass:
453 EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
454 break;
455 case Stmt::OpenACCLoopConstructClass:
456 EmitOpenACCLoopConstruct(cast<OpenACCLoopConstruct>(*S));
457 break;
458 case Stmt::OpenACCCombinedConstructClass:
459 EmitOpenACCCombinedConstruct(cast<OpenACCCombinedConstruct>(*S));
460 break;
461 case Stmt::OpenACCDataConstructClass:
462 EmitOpenACCDataConstruct(cast<OpenACCDataConstruct>(*S));
463 break;
464 case Stmt::OpenACCEnterDataConstructClass:
465 EmitOpenACCEnterDataConstruct(cast<OpenACCEnterDataConstruct>(*S));
466 break;
467 case Stmt::OpenACCExitDataConstructClass:
468 EmitOpenACCExitDataConstruct(cast<OpenACCExitDataConstruct>(*S));
469 break;
470 case Stmt::OpenACCHostDataConstructClass:
471 EmitOpenACCHostDataConstruct(cast<OpenACCHostDataConstruct>(*S));
472 break;
473 case Stmt::OpenACCWaitConstructClass:
474 EmitOpenACCWaitConstruct(cast<OpenACCWaitConstruct>(*S));
475 break;
476 case Stmt::OpenACCInitConstructClass:
477 EmitOpenACCInitConstruct(cast<OpenACCInitConstruct>(*S));
478 break;
479 case Stmt::OpenACCShutdownConstructClass:
480 EmitOpenACCShutdownConstruct(cast<OpenACCShutdownConstruct>(*S));
481 break;
482 }
483}
484
487 switch (S->getStmtClass()) {
488 default:
489 return false;
490 case Stmt::NullStmtClass:
491 break;
492 case Stmt::CompoundStmtClass:
493 EmitCompoundStmt(cast<CompoundStmt>(*S));
494 break;
495 case Stmt::DeclStmtClass:
496 EmitDeclStmt(cast<DeclStmt>(*S));
497 break;
498 case Stmt::LabelStmtClass:
499 EmitLabelStmt(cast<LabelStmt>(*S));
500 break;
501 case Stmt::AttributedStmtClass:
502 EmitAttributedStmt(cast<AttributedStmt>(*S));
503 break;
504 case Stmt::GotoStmtClass:
505 EmitGotoStmt(cast<GotoStmt>(*S));
506 break;
507 case Stmt::BreakStmtClass:
508 EmitBreakStmt(cast<BreakStmt>(*S));
509 break;
510 case Stmt::ContinueStmtClass:
511 EmitContinueStmt(cast<ContinueStmt>(*S));
512 break;
513 case Stmt::DefaultStmtClass:
514 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
515 break;
516 case Stmt::CaseStmtClass:
517 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
518 break;
519 case Stmt::SEHLeaveStmtClass:
520 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
521 break;
522 }
523 return true;
524}
525
526/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
527/// this captures the expression result of the last sub-statement and returns it
528/// (for use by the statement expression extension).
530 AggValueSlot AggSlot) {
531 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
532 "LLVM IR generation of compound statement ('{}')");
533
534 // Keep track of the current cleanup stack depth, including debug scopes.
535 LexicalScope Scope(*this, S.getSourceRange());
536
537 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
538}
539
542 bool GetLast,
543 AggValueSlot AggSlot) {
544
545 const Stmt *ExprResult = S.getStmtExprResult();
546 assert((!GetLast || (GetLast && ExprResult)) &&
547 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
548
549 Address RetAlloca = Address::invalid();
550
551 for (auto *CurStmt : S.body()) {
552 if (GetLast && ExprResult == CurStmt) {
553 // We have to special case labels here. They are statements, but when put
554 // at the end of a statement expression, they yield the value of their
555 // subexpression. Handle this by walking through all labels we encounter,
556 // emitting them before we evaluate the subexpr.
557 // Similar issues arise for attributed statements.
558 while (!isa<Expr>(ExprResult)) {
559 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
560 EmitLabel(LS->getDecl());
561 ExprResult = LS->getSubStmt();
562 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
563 // FIXME: Update this if we ever have attributes that affect the
564 // semantics of an expression.
565 ExprResult = AS->getSubStmt();
566 } else {
567 llvm_unreachable("unknown value statement");
568 }
569 }
570
572
573 const Expr *E = cast<Expr>(ExprResult);
574 QualType ExprTy = E->getType();
575 if (hasAggregateEvaluationKind(ExprTy)) {
576 EmitAggExpr(E, AggSlot);
577 } else {
578 // We can't return an RValue here because there might be cleanups at
579 // the end of the StmtExpr. Because of that, we have to emit the result
580 // here into a temporary alloca.
581 RetAlloca = CreateMemTemp(ExprTy);
582 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
583 /*IsInit*/ false);
584 }
585 } else {
586 EmitStmt(CurStmt);
587 }
588 }
589
590 return RetAlloca;
591}
592
593void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
594 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
595
596 // If there is a cleanup stack, then we it isn't worth trying to
597 // simplify this block (we would need to remove it from the scope map
598 // and cleanup entry).
599 if (!EHStack.empty())
600 return;
601
602 // Can only simplify direct branches.
603 if (!BI || !BI->isUnconditional())
604 return;
605
606 // Can only simplify empty blocks.
607 if (BI->getIterator() != BB->begin())
608 return;
609
610 BB->replaceAllUsesWith(BI->getSuccessor(0));
611 BI->eraseFromParent();
612 BB->eraseFromParent();
613}
614
615void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
616 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
617
618 // Fall out of the current block (if necessary).
619 EmitBranch(BB);
620
621 if (IsFinished && BB->use_empty()) {
622 delete BB;
623 return;
624 }
625
626 // Place the block after the current block, if possible, or else at
627 // the end of the function.
628 if (CurBB && CurBB->getParent())
629 CurFn->insert(std::next(CurBB->getIterator()), BB);
630 else
631 CurFn->insert(CurFn->end(), BB);
632 Builder.SetInsertPoint(BB);
633}
634
635void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
636 // Emit a branch from the current block to the target one if this
637 // was a real block. If this was just a fall-through block after a
638 // terminator, don't emit it.
639 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
640
641 if (!CurBB || CurBB->getTerminator()) {
642 // If there is no insert point or the previous block is already
643 // terminated, don't touch it.
644 } else {
645 // Otherwise, create a fall-through branch.
646 Builder.CreateBr(Target);
647 }
648
649 Builder.ClearInsertionPoint();
650}
651
652void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
653 bool inserted = false;
654 for (llvm::User *u : block->users()) {
655 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
656 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
657 inserted = true;
658 break;
659 }
660 }
661
662 if (!inserted)
663 CurFn->insert(CurFn->end(), block);
664
665 Builder.SetInsertPoint(block);
666}
667
668CodeGenFunction::JumpDest
670 JumpDest &Dest = LabelMap[D];
671 if (Dest.isValid()) return Dest;
672
673 // Create, but don't insert, the new block.
674 Dest = JumpDest(createBasicBlock(D->getName()),
677 return Dest;
678}
679
681 // Add this label to the current lexical scope if we're within any
682 // normal cleanups. Jumps "in" to this label --- when permitted by
683 // the language --- may need to be routed around such cleanups.
684 if (EHStack.hasNormalCleanups() && CurLexicalScope)
685 CurLexicalScope->addLabel(D);
686
687 JumpDest &Dest = LabelMap[D];
688
689 // If we didn't need a forward reference to this label, just go
690 // ahead and create a destination at the current scope.
691 if (!Dest.isValid()) {
692 Dest = getJumpDestInCurrentScope(D->getName());
693
694 // Otherwise, we need to give this label a target depth and remove
695 // it from the branch-fixups list.
696 } else {
697 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
698 Dest.setScopeDepth(EHStack.stable_begin());
699 ResolveBranchFixups(Dest.getBlock());
700 }
701
702 EmitBlock(Dest.getBlock());
703
704 // Emit debug info for labels.
705 if (CGDebugInfo *DI = getDebugInfo()) {
707 DI->setLocation(D->getLocation());
708 DI->EmitLabel(D, Builder);
709 }
710 }
711
712 incrementProfileCounter(D->getStmt());
713}
714
715/// Change the cleanup scope of the labels in this lexical scope to
716/// match the scope of the enclosing context.
718 assert(!Labels.empty());
719 EHScopeStack::stable_iterator innermostScope
721
722 // Change the scope depth of all the labels.
724 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
725 assert(CGF.LabelMap.count(*i));
726 JumpDest &dest = CGF.LabelMap.find(*i)->second;
727 assert(dest.getScopeDepth().isValid());
728 assert(innermostScope.encloses(dest.getScopeDepth()));
729 dest.setScopeDepth(innermostScope);
730 }
731
732 // Reparent the labels if the new scope also has cleanups.
733 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
734 ParentScope->Labels.append(Labels.begin(), Labels.end());
735 }
736}
737
738
740 EmitLabel(S.getDecl());
741
742 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
743 if (getLangOpts().EHAsynch && S.isSideEntry())
745
746 EmitStmt(S.getSubStmt());
747}
748
750 bool nomerge = false;
751 bool noinline = false;
752 bool alwaysinline = false;
753 bool noconvergent = false;
754 const CallExpr *musttail = nullptr;
755
756 for (const auto *A : S.getAttrs()) {
757 switch (A->getKind()) {
758 default:
759 break;
760 case attr::NoMerge:
761 nomerge = true;
762 break;
763 case attr::NoInline:
764 noinline = true;
765 break;
766 case attr::AlwaysInline:
767 alwaysinline = true;
768 break;
769 case attr::NoConvergent:
770 noconvergent = true;
771 break;
772 case attr::MustTail: {
773 const Stmt *Sub = S.getSubStmt();
774 const ReturnStmt *R = cast<ReturnStmt>(Sub);
775 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
776 } break;
777 case attr::CXXAssume: {
778 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
779 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
780 !Assumption->HasSideEffects(getContext())) {
781 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
782 Builder.CreateAssumption(AssumptionVal);
783 }
784 } break;
785 }
786 }
787 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
788 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
789 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
790 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
791 SaveAndRestore save_musttail(MustTailCall, musttail);
792 EmitStmt(S.getSubStmt(), S.getAttrs());
793}
794
796 // If this code is reachable then emit a stop point (if generating
797 // debug info). We have to do this ourselves because we are on the
798 // "simple" statement path.
799 if (HaveInsertPoint())
800 EmitStopPoint(&S);
801
803}
804
805
807 if (const LabelDecl *Target = S.getConstantTarget()) {
809 return;
810 }
811
812 // Ensure that we have an i8* for our PHI node.
813 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
814 Int8PtrTy, "addr");
815 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
816
817 // Get the basic block for the indirect goto.
818 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
819
820 // The first instruction in the block has to be the PHI for the switch dest,
821 // add an entry for this branch.
822 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
823
824 EmitBranch(IndGotoBB);
825}
826
827void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
828 const Stmt *Else = S.getElse();
829
830 // The else branch of a consteval if statement is always the only branch that
831 // can be runtime evaluated.
832 if (S.isConsteval()) {
833 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
834 if (Executed) {
835 RunCleanupsScope ExecutedScope(*this);
836 EmitStmt(Executed);
837 }
838 return;
839 }
840
841 // C99 6.8.4.1: The first substatement is executed if the expression compares
842 // unequal to 0. The condition must be a scalar type.
843 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
844 ApplyDebugLocation DL(*this, S.getCond());
845
846 if (S.getInit())
847 EmitStmt(S.getInit());
848
849 if (S.getConditionVariable())
850 EmitDecl(*S.getConditionVariable());
851
852 // If the condition constant folds and can be elided, try to avoid emitting
853 // the condition and the dead arm of the if/else.
854 bool CondConstant;
855 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
856 S.isConstexpr())) {
857 // Figure out which block (then or else) is executed.
858 const Stmt *Executed = S.getThen();
859 const Stmt *Skipped = Else;
860 if (!CondConstant) // Condition false?
861 std::swap(Executed, Skipped);
862
863 // If the skipped block has no labels in it, just emit the executed block.
864 // This avoids emitting dead code and simplifies the CFG substantially.
865 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
866 if (CondConstant)
868 if (Executed) {
869 RunCleanupsScope ExecutedScope(*this);
870 EmitStmt(Executed);
871 }
872 return;
873 }
874 }
875
876 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
877 // the conditional branch.
878 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
879 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
880 llvm::BasicBlock *ElseBlock = ContBlock;
881 if (Else)
882 ElseBlock = createBasicBlock("if.else");
883
884 // Prefer the PGO based weights over the likelihood attribute.
885 // When the build isn't optimized the metadata isn't used, so don't generate
886 // it.
887 // Also, differentiate between disabled PGO and a never executed branch with
888 // PGO. Assuming PGO is in use:
889 // - we want to ignore the [[likely]] attribute if the branch is never
890 // executed,
891 // - assuming the profile is poor, preserving the attribute may still be
892 // beneficial.
893 // As an approximation, preserve the attribute only if both the branch and the
894 // parent context were not executed.
896 uint64_t ThenCount = getProfileCount(S.getThen());
897 if (!ThenCount && !getCurrentProfileCount() &&
898 CGM.getCodeGenOpts().OptimizationLevel)
899 LH = Stmt::getLikelihood(S.getThen(), Else);
900
901 // When measuring MC/DC, always fully evaluate the condition up front using
902 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
903 // executing the body of the if.then or if.else. This is useful for when
904 // there is a 'return' within the body, but this is particularly beneficial
905 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
906 // updates are kept linear and consistent.
907 if (!CGM.getCodeGenOpts().MCDCCoverage)
908 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
909 else {
910 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
911 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
912 }
913
914 // Emit the 'then' code.
915 EmitBlock(ThenBlock);
917 incrementProfileCounter(S.getThen());
918 else
920 {
921 RunCleanupsScope ThenScope(*this);
922 EmitStmt(S.getThen());
923 }
924 EmitBranch(ContBlock);
925
926 // Emit the 'else' code if present.
927 if (Else) {
928 {
929 // There is no need to emit line number for an unconditional branch.
930 auto NL = ApplyDebugLocation::CreateEmpty(*this);
931 EmitBlock(ElseBlock);
932 }
933 // When single byte coverage mode is enabled, add a counter to else block.
936 {
937 RunCleanupsScope ElseScope(*this);
938 EmitStmt(Else);
939 }
940 {
941 // There is no need to emit line number for an unconditional branch.
942 auto NL = ApplyDebugLocation::CreateEmpty(*this);
943 EmitBranch(ContBlock);
944 }
945 }
946
947 // Emit the continuation block for code after the if.
948 EmitBlock(ContBlock, true);
949
950 // When single byte coverage mode is enabled, add a counter to continuation
951 // block.
954}
955
956bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
957 bool HasEmptyBody) {
958 if (CGM.getCodeGenOpts().getFiniteLoops() ==
960 return false;
961
962 // Now apply rules for plain C (see 6.8.5.6 in C11).
963 // Loops with constant conditions do not have to make progress in any C
964 // version.
965 // As an extension, we consisider loops whose constant expression
966 // can be constant-folded.
968 bool CondIsConstInt =
969 !ControllingExpression ||
970 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
971 Result.Val.isInt());
972
973 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
974 Result.Val.getInt().getBoolValue());
975
976 // Loops with non-constant conditions must make progress in C11 and later.
977 if (getLangOpts().C11 && !CondIsConstInt)
978 return true;
979
980 // [C++26][intro.progress] (DR)
981 // The implementation may assume that any thread will eventually do one of the
982 // following:
983 // [...]
984 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
985 if (CGM.getCodeGenOpts().getFiniteLoops() ==
987 getLangOpts().CPlusPlus11) {
988 if (HasEmptyBody && CondIsTrue) {
989 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
990 return false;
991 }
992 return true;
993 }
994 return false;
995}
996
997// [C++26][stmt.iter.general] (DR)
998// A trivially empty iteration statement is an iteration statement matching one
999// of the following forms:
1000// - while ( expression ) ;
1001// - while ( expression ) { }
1002// - do ; while ( expression ) ;
1003// - do { } while ( expression ) ;
1004// - for ( init-statement expression(opt); ) ;
1005// - for ( init-statement expression(opt); ) { }
1006template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1007 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1008 if (S.getInc())
1009 return false;
1010 }
1011 const Stmt *Body = S.getBody();
1012 if (!Body || isa<NullStmt>(Body))
1013 return true;
1014 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1015 return Compound->body_empty();
1016 return false;
1017}
1018
1020 ArrayRef<const Attr *> WhileAttrs) {
1021 // Emit the header for the loop, which will also become
1022 // the continue target.
1023 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1024 EmitBlock(LoopHeader.getBlock());
1025
1027 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(
1028 LoopHeader.getBlock(), ConvergenceTokenStack.back()));
1029
1030 // Create an exit block for when the condition fails, which will
1031 // also become the break target.
1032 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
1033
1034 // Store the blocks to use for break and continue.
1035 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
1036
1037 // C++ [stmt.while]p2:
1038 // When the condition of a while statement is a declaration, the
1039 // scope of the variable that is declared extends from its point
1040 // of declaration (3.3.2) to the end of the while statement.
1041 // [...]
1042 // The object created in a condition is destroyed and created
1043 // with each iteration of the loop.
1044 RunCleanupsScope ConditionScope(*this);
1045
1046 if (S.getConditionVariable())
1047 EmitDecl(*S.getConditionVariable());
1048
1049 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1050 // evaluation of the controlling expression takes place before each
1051 // execution of the loop body.
1052 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1053
1054 // while(1) is common, avoid extra exit blocks. Be sure
1055 // to correctly handle break/continue though.
1056 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1057 bool EmitBoolCondBranch = !C || !C->isOne();
1058 const SourceRange &R = S.getSourceRange();
1059 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1060 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1062 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1063
1064 // When single byte coverage mode is enabled, add a counter to loop condition.
1066 incrementProfileCounter(S.getCond());
1067
1068 // As long as the condition is true, go to the loop body.
1069 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1070 if (EmitBoolCondBranch) {
1071 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1072 if (ConditionScope.requiresCleanups())
1073 ExitBlock = createBasicBlock("while.exit");
1074 llvm::MDNode *Weights =
1075 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1076 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1077 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1078 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1079 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1080
1081 if (ExitBlock != LoopExit.getBlock()) {
1082 EmitBlock(ExitBlock);
1084 }
1085 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1086 CGM.getDiags().Report(A->getLocation(),
1087 diag::warn_attribute_has_no_effect_on_infinite_loop)
1088 << A << A->getRange();
1090 S.getWhileLoc(),
1091 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1092 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
1093 }
1094
1095 // Emit the loop body. We have to emit this in a cleanup scope
1096 // because it might be a singleton DeclStmt.
1097 {
1098 RunCleanupsScope BodyScope(*this);
1099 EmitBlock(LoopBody);
1100 // When single byte coverage mode is enabled, add a counter to the body.
1102 incrementProfileCounter(S.getBody());
1103 else
1105 EmitStmt(S.getBody());
1106 }
1107
1108 BreakContinueStack.pop_back();
1109
1110 // Immediately force cleanup.
1111 ConditionScope.ForceCleanup();
1112
1113 EmitStopPoint(&S);
1114 // Branch to the loop header again.
1115 EmitBranch(LoopHeader.getBlock());
1116
1117 LoopStack.pop();
1118
1119 // Emit the exit block.
1120 EmitBlock(LoopExit.getBlock(), true);
1121
1122 // The LoopHeader typically is just a branch if we skipped emitting
1123 // a branch, try to erase it.
1124 if (!EmitBoolCondBranch)
1125 SimplifyForwardingBlocks(LoopHeader.getBlock());
1126
1127 // When single byte coverage mode is enabled, add a counter to continuation
1128 // block.
1131
1133 ConvergenceTokenStack.pop_back();
1134}
1135
1137 ArrayRef<const Attr *> DoAttrs) {
1138 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
1139 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1140
1141 uint64_t ParentCount = getCurrentProfileCount();
1142
1143 // Store the blocks to use for break and continue.
1144 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
1145
1146 // Emit the body of the loop.
1147 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1148
1150 EmitBlockWithFallThrough(LoopBody, S.getBody());
1151 else
1152 EmitBlockWithFallThrough(LoopBody, &S);
1153
1155 ConvergenceTokenStack.push_back(
1156 emitConvergenceLoopToken(LoopBody, ConvergenceTokenStack.back()));
1157
1158 {
1159 RunCleanupsScope BodyScope(*this);
1160 EmitStmt(S.getBody());
1161 }
1162
1163 EmitBlock(LoopCond.getBlock());
1164 // When single byte coverage mode is enabled, add a counter to loop condition.
1166 incrementProfileCounter(S.getCond());
1167
1168 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1169 // after each execution of the loop body."
1170
1171 // Evaluate the conditional in the while header.
1172 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1173 // compares unequal to 0. The condition must be a scalar type.
1174 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1175
1176 BreakContinueStack.pop_back();
1177
1178 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1179 // to correctly handle break/continue though.
1180 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1181 bool EmitBoolCondBranch = !C || !C->isZero();
1182
1183 const SourceRange &R = S.getSourceRange();
1184 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1187 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1188
1189 // As long as the condition is true, iterate the loop.
1190 if (EmitBoolCondBranch) {
1191 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1192 Builder.CreateCondBr(
1193 BoolCondVal, LoopBody, LoopExit.getBlock(),
1194 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1195 }
1196
1197 LoopStack.pop();
1198
1199 // Emit the exit block.
1200 EmitBlock(LoopExit.getBlock());
1201
1202 // The DoCond block typically is just a branch if we skipped
1203 // emitting a branch, try to erase it.
1204 if (!EmitBoolCondBranch)
1205 SimplifyForwardingBlocks(LoopCond.getBlock());
1206
1207 // When single byte coverage mode is enabled, add a counter to continuation
1208 // block.
1211
1213 ConvergenceTokenStack.pop_back();
1214}
1215
1217 ArrayRef<const Attr *> ForAttrs) {
1218 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1219
1220 LexicalScope ForScope(*this, S.getSourceRange());
1221
1222 // Evaluate the first part before the loop.
1223 if (S.getInit())
1224 EmitStmt(S.getInit());
1225
1226 // Start the loop with a block that tests the condition.
1227 // If there's an increment, the continue scope will be overwritten
1228 // later.
1229 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1230 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1231 EmitBlock(CondBlock);
1232
1234 ConvergenceTokenStack.push_back(
1235 emitConvergenceLoopToken(CondBlock, ConvergenceTokenStack.back()));
1236
1237 const SourceRange &R = S.getSourceRange();
1238 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1241 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1242
1243 // Create a cleanup scope for the condition variable cleanups.
1244 LexicalScope ConditionScope(*this, S.getSourceRange());
1245
1246 // If the for loop doesn't have an increment we can just use the condition as
1247 // the continue block. Otherwise, if there is no condition variable, we can
1248 // form the continue block now. If there is a condition variable, we can't
1249 // form the continue block until after we've emitted the condition, because
1250 // the condition is in scope in the increment, but Sema's jump diagnostics
1251 // ensure that there are no continues from the condition variable that jump
1252 // to the loop increment.
1253 JumpDest Continue;
1254 if (!S.getInc())
1255 Continue = CondDest;
1256 else if (!S.getConditionVariable())
1257 Continue = getJumpDestInCurrentScope("for.inc");
1258 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1259
1260 if (S.getCond()) {
1261 // If the for statement has a condition scope, emit the local variable
1262 // declaration.
1263 if (S.getConditionVariable()) {
1264 EmitDecl(*S.getConditionVariable());
1265
1266 // We have entered the condition variable's scope, so we're now able to
1267 // jump to the continue block.
1268 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1269 BreakContinueStack.back().ContinueBlock = Continue;
1270 }
1271
1272 // When single byte coverage mode is enabled, add a counter to loop
1273 // condition.
1275 incrementProfileCounter(S.getCond());
1276
1277 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1278 // If there are any cleanups between here and the loop-exit scope,
1279 // create a block to stage a loop exit along.
1280 if (ForScope.requiresCleanups())
1281 ExitBlock = createBasicBlock("for.cond.cleanup");
1282
1283 // As long as the condition is true, iterate the loop.
1284 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1285
1286 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1287 // compares unequal to 0. The condition must be a scalar type.
1288 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1289 llvm::MDNode *Weights =
1290 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1291 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1292 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1293 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1294
1295 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1296
1297 if (ExitBlock != LoopExit.getBlock()) {
1298 EmitBlock(ExitBlock);
1300 }
1301
1302 EmitBlock(ForBody);
1303 } else {
1304 // Treat it as a non-zero constant. Don't even create a new block for the
1305 // body, just fall into it.
1306 }
1307
1308 // When single byte coverage mode is enabled, add a counter to the body.
1310 incrementProfileCounter(S.getBody());
1311 else
1313 {
1314 // Create a separate cleanup scope for the body, in case it is not
1315 // a compound statement.
1316 RunCleanupsScope BodyScope(*this);
1317 EmitStmt(S.getBody());
1318 }
1319
1320 // If there is an increment, emit it next.
1321 if (S.getInc()) {
1322 EmitBlock(Continue.getBlock());
1323 EmitStmt(S.getInc());
1325 incrementProfileCounter(S.getInc());
1326 }
1327
1328 BreakContinueStack.pop_back();
1329
1330 ConditionScope.ForceCleanup();
1331
1332 EmitStopPoint(&S);
1333 EmitBranch(CondBlock);
1334
1335 ForScope.ForceCleanup();
1336
1337 LoopStack.pop();
1338
1339 // Emit the fall-through block.
1340 EmitBlock(LoopExit.getBlock(), true);
1341
1342 // When single byte coverage mode is enabled, add a counter to continuation
1343 // block.
1346
1348 ConvergenceTokenStack.pop_back();
1349}
1350
1351void
1353 ArrayRef<const Attr *> ForAttrs) {
1354 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1355
1356 LexicalScope ForScope(*this, S.getSourceRange());
1357
1358 // Evaluate the first pieces before the loop.
1359 if (S.getInit())
1360 EmitStmt(S.getInit());
1361 EmitStmt(S.getRangeStmt());
1362 EmitStmt(S.getBeginStmt());
1363 EmitStmt(S.getEndStmt());
1364
1365 // Start the loop with a block that tests the condition.
1366 // If there's an increment, the continue scope will be overwritten
1367 // later.
1368 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1369 EmitBlock(CondBlock);
1370
1372 ConvergenceTokenStack.push_back(
1373 emitConvergenceLoopToken(CondBlock, ConvergenceTokenStack.back()));
1374
1375 const SourceRange &R = S.getSourceRange();
1376 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1379
1380 // If there are any cleanups between here and the loop-exit scope,
1381 // create a block to stage a loop exit along.
1382 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1383 if (ForScope.requiresCleanups())
1384 ExitBlock = createBasicBlock("for.cond.cleanup");
1385
1386 // The loop body, consisting of the specified body and the loop variable.
1387 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1388
1389 // The body is executed if the expression, contextually converted
1390 // to bool, is true.
1391 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1392 llvm::MDNode *Weights =
1393 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1394 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1395 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1396 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1397 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1398
1399 if (ExitBlock != LoopExit.getBlock()) {
1400 EmitBlock(ExitBlock);
1402 }
1403
1404 EmitBlock(ForBody);
1406 incrementProfileCounter(S.getBody());
1407 else
1409
1410 // Create a block for the increment. In case of a 'continue', we jump there.
1411 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1412
1413 // Store the blocks to use for break and continue.
1414 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1415
1416 {
1417 // Create a separate cleanup scope for the loop variable and body.
1418 LexicalScope BodyScope(*this, S.getSourceRange());
1419 EmitStmt(S.getLoopVarStmt());
1420 EmitStmt(S.getBody());
1421 }
1422
1423 EmitStopPoint(&S);
1424 // If there is an increment, emit it next.
1425 EmitBlock(Continue.getBlock());
1426 EmitStmt(S.getInc());
1427
1428 BreakContinueStack.pop_back();
1429
1430 EmitBranch(CondBlock);
1431
1432 ForScope.ForceCleanup();
1433
1434 LoopStack.pop();
1435
1436 // Emit the fall-through block.
1437 EmitBlock(LoopExit.getBlock(), true);
1438
1439 // When single byte coverage mode is enabled, add a counter to continuation
1440 // block.
1443
1445 ConvergenceTokenStack.pop_back();
1446}
1447
1448void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1449 if (RV.isScalar()) {
1451 } else if (RV.isAggregate()) {
1452 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1455 } else {
1457 /*init*/ true);
1458 }
1460}
1461
1462namespace {
1463// RAII struct used to save and restore a return statment's result expression.
1464struct SaveRetExprRAII {
1465 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1466 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1467 CGF.RetExpr = RetExpr;
1468 }
1469 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1470 const Expr *OldRetExpr;
1471 CodeGenFunction &CGF;
1472};
1473} // namespace
1474
1475/// Determine if the given call uses the swiftasync calling convention.
1476static bool isSwiftAsyncCallee(const CallExpr *CE) {
1477 auto calleeQualType = CE->getCallee()->getType();
1478 const FunctionType *calleeType = nullptr;
1479 if (calleeQualType->isFunctionPointerType() ||
1480 calleeQualType->isFunctionReferenceType() ||
1481 calleeQualType->isBlockPointerType() ||
1482 calleeQualType->isMemberFunctionPointerType()) {
1483 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1484 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1485 calleeType = ty;
1486 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1487 if (auto methodDecl = CMCE->getMethodDecl()) {
1488 // getMethodDecl() doesn't handle member pointers at the moment.
1489 calleeType = methodDecl->getType()->castAs<FunctionType>();
1490 } else {
1491 return false;
1492 }
1493 } else {
1494 return false;
1495 }
1496 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1497}
1498
1499/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1500/// if the function returns void, or may be missing one if the function returns
1501/// non-void. Fun stuff :).
1503 if (requiresReturnValueCheck()) {
1504 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1505 auto *SLocPtr =
1506 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1507 llvm::GlobalVariable::PrivateLinkage, SLoc);
1508 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1510 assert(ReturnLocation.isValid() && "No valid return location");
1511 Builder.CreateStore(SLocPtr, ReturnLocation);
1512 }
1513
1514 // Returning from an outlined SEH helper is UB, and we already warn on it.
1515 if (IsOutlinedSEHHelper) {
1516 Builder.CreateUnreachable();
1517 Builder.ClearInsertionPoint();
1518 }
1519
1520 // Emit the result value, even if unused, to evaluate the side effects.
1521 const Expr *RV = S.getRetValue();
1522
1523 // Record the result expression of the return statement. The recorded
1524 // expression is used to determine whether a block capture's lifetime should
1525 // end at the end of the full expression as opposed to the end of the scope
1526 // enclosing the block expression.
1527 //
1528 // This permits a small, easily-implemented exception to our over-conservative
1529 // rules about not jumping to statements following block literals with
1530 // non-trivial cleanups.
1531 SaveRetExprRAII SaveRetExpr(RV, *this);
1532
1533 RunCleanupsScope cleanupScope(*this);
1534 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1535 RV = EWC->getSubExpr();
1536
1537 // If we're in a swiftasynccall function, and the return expression is a
1538 // call to a swiftasynccall function, mark the call as the musttail call.
1539 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1540 if (RV && CurFnInfo &&
1542 if (auto CE = dyn_cast<CallExpr>(RV)) {
1543 if (isSwiftAsyncCallee(CE)) {
1544 SaveMustTail.emplace(MustTailCall, CE);
1545 }
1546 }
1547 }
1548
1549 // FIXME: Clean this up by using an LValue for ReturnTemp,
1550 // EmitStoreThroughLValue, and EmitAnyExpr.
1551 // Check if the NRVO candidate was not globalized in OpenMP mode.
1552 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1553 S.getNRVOCandidate()->isNRVOVariable() &&
1554 (!getLangOpts().OpenMP ||
1556 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1557 .isValid())) {
1558 // Apply the named return value optimization for this return statement,
1559 // which means doing nothing: the appropriate result has already been
1560 // constructed into the NRVO variable.
1561
1562 // If there is an NRVO flag for this variable, set it to 1 into indicate
1563 // that the cleanup code should not destroy the variable.
1564 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1565 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1566 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1567 // Make sure not to return anything, but evaluate the expression
1568 // for side effects.
1569 if (RV) {
1570 EmitAnyExpr(RV);
1571 }
1572 } else if (!RV) {
1573 // Do nothing (return value is left uninitialized)
1574 } else if (FnRetTy->isReferenceType()) {
1575 // If this function returns a reference, take the address of the expression
1576 // rather than the value.
1578 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1579 } else {
1580 switch (getEvaluationKind(RV->getType())) {
1581 case TEK_Scalar: {
1582 llvm::Value *Ret = EmitScalarExpr(RV);
1585 /*isInit*/ true);
1586 else
1588 break;
1589 }
1590 case TEK_Complex:
1592 /*isInit*/ true);
1593 break;
1594 case TEK_Aggregate:
1601 break;
1602 }
1603 }
1604
1605 ++NumReturnExprs;
1606 if (!RV || RV->isEvaluatable(getContext()))
1607 ++NumSimpleReturnExprs;
1608
1609 cleanupScope.ForceCleanup();
1611}
1612
1614 // As long as debug info is modeled with instructions, we have to ensure we
1615 // have a place to insert here and write the stop point here.
1616 if (HaveInsertPoint())
1617 EmitStopPoint(&S);
1618
1619 for (const auto *I : S.decls())
1620 EmitDecl(*I);
1621}
1622
1624 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1625
1626 // If this code is reachable then emit a stop point (if generating
1627 // debug info). We have to do this ourselves because we are on the
1628 // "simple" statement path.
1629 if (HaveInsertPoint())
1630 EmitStopPoint(&S);
1631
1632 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1633}
1634
1636 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1637
1638 // If this code is reachable then emit a stop point (if generating
1639 // debug info). We have to do this ourselves because we are on the
1640 // "simple" statement path.
1641 if (HaveInsertPoint())
1642 EmitStopPoint(&S);
1643
1644 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1645}
1646
1647/// EmitCaseStmtRange - If case statement range is not too big then
1648/// add multiple cases to switch instruction, one for each value within
1649/// the range. If range is too big then emit "if" condition check.
1651 ArrayRef<const Attr *> Attrs) {
1652 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1653
1654 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1655 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1656
1657 // Emit the code for this case. We do this first to make sure it is
1658 // properly chained from our predecessor before generating the
1659 // switch machinery to enter this block.
1660 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1661 EmitBlockWithFallThrough(CaseDest, &S);
1662 EmitStmt(S.getSubStmt());
1663
1664 // If range is empty, do nothing.
1665 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1666 return;
1667
1669 llvm::APInt Range = RHS - LHS;
1670 // FIXME: parameters such as this should not be hardcoded.
1671 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1672 // Range is small enough to add multiple switch instruction cases.
1673 uint64_t Total = getProfileCount(&S);
1674 unsigned NCases = Range.getZExtValue() + 1;
1675 // We only have one region counter for the entire set of cases here, so we
1676 // need to divide the weights evenly between the generated cases, ensuring
1677 // that the total weight is preserved. E.g., a weight of 5 over three cases
1678 // will be distributed as weights of 2, 2, and 1.
1679 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1680 for (unsigned I = 0; I != NCases; ++I) {
1681 if (SwitchWeights)
1682 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1683 else if (SwitchLikelihood)
1684 SwitchLikelihood->push_back(LH);
1685
1686 if (Rem)
1687 Rem--;
1688 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1689 ++LHS;
1690 }
1691 return;
1692 }
1693
1694 // The range is too big. Emit "if" condition into a new block,
1695 // making sure to save and restore the current insertion point.
1696 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1697
1698 // Push this test onto the chain of range checks (which terminates
1699 // in the default basic block). The switch's default will be changed
1700 // to the top of this chain after switch emission is complete.
1701 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1702 CaseRangeBlock = createBasicBlock("sw.caserange");
1703
1704 CurFn->insert(CurFn->end(), CaseRangeBlock);
1705 Builder.SetInsertPoint(CaseRangeBlock);
1706
1707 // Emit range check.
1708 llvm::Value *Diff =
1709 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1710 llvm::Value *Cond =
1711 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1712
1713 llvm::MDNode *Weights = nullptr;
1714 if (SwitchWeights) {
1715 uint64_t ThisCount = getProfileCount(&S);
1716 uint64_t DefaultCount = (*SwitchWeights)[0];
1717 Weights = createProfileWeights(ThisCount, DefaultCount);
1718
1719 // Since we're chaining the switch default through each large case range, we
1720 // need to update the weight for the default, ie, the first case, to include
1721 // this case.
1722 (*SwitchWeights)[0] += ThisCount;
1723 } else if (SwitchLikelihood)
1724 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1725
1726 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1727
1728 // Restore the appropriate insertion point.
1729 if (RestoreBB)
1730 Builder.SetInsertPoint(RestoreBB);
1731 else
1732 Builder.ClearInsertionPoint();
1733}
1734
1736 ArrayRef<const Attr *> Attrs) {
1737 // If there is no enclosing switch instance that we're aware of, then this
1738 // case statement and its block can be elided. This situation only happens
1739 // when we've constant-folded the switch, are emitting the constant case,
1740 // and part of the constant case includes another case statement. For
1741 // instance: switch (4) { case 4: do { case 5: } while (1); }
1742 if (!SwitchInsn) {
1743 EmitStmt(S.getSubStmt());
1744 return;
1745 }
1746
1747 // Handle case ranges.
1748 if (S.getRHS()) {
1749 EmitCaseStmtRange(S, Attrs);
1750 return;
1751 }
1752
1753 llvm::ConstantInt *CaseVal =
1754 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1755
1756 // Emit debuginfo for the case value if it is an enum value.
1757 const ConstantExpr *CE;
1758 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1759 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1760 else
1761 CE = dyn_cast<ConstantExpr>(S.getLHS());
1762 if (CE) {
1763 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1764 if (CGDebugInfo *Dbg = getDebugInfo())
1766 Dbg->EmitGlobalVariable(DE->getDecl(),
1767 APValue(llvm::APSInt(CaseVal->getValue())));
1768 }
1769
1770 if (SwitchLikelihood)
1771 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1772
1773 // If the body of the case is just a 'break', try to not emit an empty block.
1774 // If we're profiling or we're not optimizing, leave the block in for better
1775 // debug and coverage analysis.
1777 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1778 isa<BreakStmt>(S.getSubStmt())) {
1779 JumpDest Block = BreakContinueStack.back().BreakBlock;
1780
1781 // Only do this optimization if there are no cleanups that need emitting.
1783 if (SwitchWeights)
1784 SwitchWeights->push_back(getProfileCount(&S));
1785 SwitchInsn->addCase(CaseVal, Block.getBlock());
1786
1787 // If there was a fallthrough into this case, make sure to redirect it to
1788 // the end of the switch as well.
1789 if (Builder.GetInsertBlock()) {
1790 Builder.CreateBr(Block.getBlock());
1791 Builder.ClearInsertionPoint();
1792 }
1793 return;
1794 }
1795 }
1796
1797 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1798 EmitBlockWithFallThrough(CaseDest, &S);
1799 if (SwitchWeights)
1800 SwitchWeights->push_back(getProfileCount(&S));
1801 SwitchInsn->addCase(CaseVal, CaseDest);
1802
1803 // Recursively emitting the statement is acceptable, but is not wonderful for
1804 // code where we have many case statements nested together, i.e.:
1805 // case 1:
1806 // case 2:
1807 // case 3: etc.
1808 // Handling this recursively will create a new block for each case statement
1809 // that falls through to the next case which is IR intensive. It also causes
1810 // deep recursion which can run into stack depth limitations. Handle
1811 // sequential non-range case statements specially.
1812 //
1813 // TODO When the next case has a likelihood attribute the code returns to the
1814 // recursive algorithm. Maybe improve this case if it becomes common practice
1815 // to use a lot of attributes.
1816 const CaseStmt *CurCase = &S;
1817 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1818
1819 // Otherwise, iteratively add consecutive cases to this switch stmt.
1820 while (NextCase && NextCase->getRHS() == nullptr) {
1821 CurCase = NextCase;
1822 llvm::ConstantInt *CaseVal =
1823 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1824
1825 if (SwitchWeights)
1826 SwitchWeights->push_back(getProfileCount(NextCase));
1828 CaseDest = createBasicBlock("sw.bb");
1829 EmitBlockWithFallThrough(CaseDest, CurCase);
1830 }
1831 // Since this loop is only executed when the CaseStmt has no attributes
1832 // use a hard-coded value.
1833 if (SwitchLikelihood)
1834 SwitchLikelihood->push_back(Stmt::LH_None);
1835
1836 SwitchInsn->addCase(CaseVal, CaseDest);
1837 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1838 }
1839
1840 // Generate a stop point for debug info if the case statement is
1841 // followed by a default statement. A fallthrough case before a
1842 // default case gets its own branch target.
1843 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1844 EmitStopPoint(CurCase);
1845
1846 // Normal default recursion for non-cases.
1847 EmitStmt(CurCase->getSubStmt());
1848}
1849
1851 ArrayRef<const Attr *> Attrs) {
1852 // If there is no enclosing switch instance that we're aware of, then this
1853 // default statement can be elided. This situation only happens when we've
1854 // constant-folded the switch.
1855 if (!SwitchInsn) {
1856 EmitStmt(S.getSubStmt());
1857 return;
1858 }
1859
1860 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1861 assert(DefaultBlock->empty() &&
1862 "EmitDefaultStmt: Default block already defined?");
1863
1864 if (SwitchLikelihood)
1865 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1866
1867 EmitBlockWithFallThrough(DefaultBlock, &S);
1868
1869 EmitStmt(S.getSubStmt());
1870}
1871
1872/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1873/// constant value that is being switched on, see if we can dead code eliminate
1874/// the body of the switch to a simple series of statements to emit. Basically,
1875/// on a switch (5) we want to find these statements:
1876/// case 5:
1877/// printf(...); <--
1878/// ++i; <--
1879/// break;
1880///
1881/// and add them to the ResultStmts vector. If it is unsafe to do this
1882/// transformation (for example, one of the elided statements contains a label
1883/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1884/// should include statements after it (e.g. the printf() line is a substmt of
1885/// the case) then return CSFC_FallThrough. If we handled it and found a break
1886/// statement, then return CSFC_Success.
1887///
1888/// If Case is non-null, then we are looking for the specified case, checking
1889/// that nothing we jump over contains labels. If Case is null, then we found
1890/// the case and are looking for the break.
1891///
1892/// If the recursive walk actually finds our Case, then we set FoundCase to
1893/// true.
1894///
1897 const SwitchCase *Case,
1898 bool &FoundCase,
1899 SmallVectorImpl<const Stmt*> &ResultStmts) {
1900 // If this is a null statement, just succeed.
1901 if (!S)
1902 return Case ? CSFC_Success : CSFC_FallThrough;
1903
1904 // If this is the switchcase (case 4: or default) that we're looking for, then
1905 // we're in business. Just add the substatement.
1906 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1907 if (S == Case) {
1908 FoundCase = true;
1909 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1910 ResultStmts);
1911 }
1912
1913 // Otherwise, this is some other case or default statement, just ignore it.
1914 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1915 ResultStmts);
1916 }
1917
1918 // If we are in the live part of the code and we found our break statement,
1919 // return a success!
1920 if (!Case && isa<BreakStmt>(S))
1921 return CSFC_Success;
1922
1923 // If this is a switch statement, then it might contain the SwitchCase, the
1924 // break, or neither.
1925 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1926 // Handle this as two cases: we might be looking for the SwitchCase (if so
1927 // the skipped statements must be skippable) or we might already have it.
1928 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1929 bool StartedInLiveCode = FoundCase;
1930 unsigned StartSize = ResultStmts.size();
1931
1932 // If we've not found the case yet, scan through looking for it.
1933 if (Case) {
1934 // Keep track of whether we see a skipped declaration. The code could be
1935 // using the declaration even if it is skipped, so we can't optimize out
1936 // the decl if the kept statements might refer to it.
1937 bool HadSkippedDecl = false;
1938
1939 // If we're looking for the case, just see if we can skip each of the
1940 // substatements.
1941 for (; Case && I != E; ++I) {
1942 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1943
1944 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1945 case CSFC_Failure: return CSFC_Failure;
1946 case CSFC_Success:
1947 // A successful result means that either 1) that the statement doesn't
1948 // have the case and is skippable, or 2) does contain the case value
1949 // and also contains the break to exit the switch. In the later case,
1950 // we just verify the rest of the statements are elidable.
1951 if (FoundCase) {
1952 // If we found the case and skipped declarations, we can't do the
1953 // optimization.
1954 if (HadSkippedDecl)
1955 return CSFC_Failure;
1956
1957 for (++I; I != E; ++I)
1958 if (CodeGenFunction::ContainsLabel(*I, true))
1959 return CSFC_Failure;
1960 return CSFC_Success;
1961 }
1962 break;
1963 case CSFC_FallThrough:
1964 // If we have a fallthrough condition, then we must have found the
1965 // case started to include statements. Consider the rest of the
1966 // statements in the compound statement as candidates for inclusion.
1967 assert(FoundCase && "Didn't find case but returned fallthrough?");
1968 // We recursively found Case, so we're not looking for it anymore.
1969 Case = nullptr;
1970
1971 // If we found the case and skipped declarations, we can't do the
1972 // optimization.
1973 if (HadSkippedDecl)
1974 return CSFC_Failure;
1975 break;
1976 }
1977 }
1978
1979 if (!FoundCase)
1980 return CSFC_Success;
1981
1982 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1983 }
1984
1985 // If we have statements in our range, then we know that the statements are
1986 // live and need to be added to the set of statements we're tracking.
1987 bool AnyDecls = false;
1988 for (; I != E; ++I) {
1990
1991 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1992 case CSFC_Failure: return CSFC_Failure;
1993 case CSFC_FallThrough:
1994 // A fallthrough result means that the statement was simple and just
1995 // included in ResultStmt, keep adding them afterwards.
1996 break;
1997 case CSFC_Success:
1998 // A successful result means that we found the break statement and
1999 // stopped statement inclusion. We just ensure that any leftover stmts
2000 // are skippable and return success ourselves.
2001 for (++I; I != E; ++I)
2002 if (CodeGenFunction::ContainsLabel(*I, true))
2003 return CSFC_Failure;
2004 return CSFC_Success;
2005 }
2006 }
2007
2008 // If we're about to fall out of a scope without hitting a 'break;', we
2009 // can't perform the optimization if there were any decls in that scope
2010 // (we'd lose their end-of-lifetime).
2011 if (AnyDecls) {
2012 // If the entire compound statement was live, there's one more thing we
2013 // can try before giving up: emit the whole thing as a single statement.
2014 // We can do that unless the statement contains a 'break;'.
2015 // FIXME: Such a break must be at the end of a construct within this one.
2016 // We could emit this by just ignoring the BreakStmts entirely.
2017 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2018 ResultStmts.resize(StartSize);
2019 ResultStmts.push_back(S);
2020 } else {
2021 return CSFC_Failure;
2022 }
2023 }
2024
2025 return CSFC_FallThrough;
2026 }
2027
2028 // Okay, this is some other statement that we don't handle explicitly, like a
2029 // for statement or increment etc. If we are skipping over this statement,
2030 // just verify it doesn't have labels, which would make it invalid to elide.
2031 if (Case) {
2032 if (CodeGenFunction::ContainsLabel(S, true))
2033 return CSFC_Failure;
2034 return CSFC_Success;
2035 }
2036
2037 // Otherwise, we want to include this statement. Everything is cool with that
2038 // so long as it doesn't contain a break out of the switch we're in.
2040
2041 // Otherwise, everything is great. Include the statement and tell the caller
2042 // that we fall through and include the next statement as well.
2043 ResultStmts.push_back(S);
2044 return CSFC_FallThrough;
2045}
2046
2047/// FindCaseStatementsForValue - Find the case statement being jumped to and
2048/// then invoke CollectStatementsForCase to find the list of statements to emit
2049/// for a switch on constant. See the comment above CollectStatementsForCase
2050/// for more details.
2052 const llvm::APSInt &ConstantCondValue,
2053 SmallVectorImpl<const Stmt*> &ResultStmts,
2054 ASTContext &C,
2055 const SwitchCase *&ResultCase) {
2056 // First step, find the switch case that is being branched to. We can do this
2057 // efficiently by scanning the SwitchCase list.
2058 const SwitchCase *Case = S.getSwitchCaseList();
2059 const DefaultStmt *DefaultCase = nullptr;
2060
2061 for (; Case; Case = Case->getNextSwitchCase()) {
2062 // It's either a default or case. Just remember the default statement in
2063 // case we're not jumping to any numbered cases.
2064 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2065 DefaultCase = DS;
2066 continue;
2067 }
2068
2069 // Check to see if this case is the one we're looking for.
2070 const CaseStmt *CS = cast<CaseStmt>(Case);
2071 // Don't handle case ranges yet.
2072 if (CS->getRHS()) return false;
2073
2074 // If we found our case, remember it as 'case'.
2075 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2076 break;
2077 }
2078
2079 // If we didn't find a matching case, we use a default if it exists, or we
2080 // elide the whole switch body!
2081 if (!Case) {
2082 // It is safe to elide the body of the switch if it doesn't contain labels
2083 // etc. If it is safe, return successfully with an empty ResultStmts list.
2084 if (!DefaultCase)
2086 Case = DefaultCase;
2087 }
2088
2089 // Ok, we know which case is being jumped to, try to collect all the
2090 // statements that follow it. This can fail for a variety of reasons. Also,
2091 // check to see that the recursive walk actually found our case statement.
2092 // Insane cases like this can fail to find it in the recursive walk since we
2093 // don't handle every stmt kind:
2094 // switch (4) {
2095 // while (1) {
2096 // case 4: ...
2097 bool FoundCase = false;
2098 ResultCase = Case;
2099 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2100 ResultStmts) != CSFC_Failure &&
2101 FoundCase;
2102}
2103
2104static std::optional<SmallVector<uint64_t, 16>>
2106 // Are there enough branches to weight them?
2107 if (Likelihoods.size() <= 1)
2108 return std::nullopt;
2109
2110 uint64_t NumUnlikely = 0;
2111 uint64_t NumNone = 0;
2112 uint64_t NumLikely = 0;
2113 for (const auto LH : Likelihoods) {
2114 switch (LH) {
2115 case Stmt::LH_Unlikely:
2116 ++NumUnlikely;
2117 break;
2118 case Stmt::LH_None:
2119 ++NumNone;
2120 break;
2121 case Stmt::LH_Likely:
2122 ++NumLikely;
2123 break;
2124 }
2125 }
2126
2127 // Is there a likelihood attribute used?
2128 if (NumUnlikely == 0 && NumLikely == 0)
2129 return std::nullopt;
2130
2131 // When multiple cases share the same code they can be combined during
2132 // optimization. In that case the weights of the branch will be the sum of
2133 // the individual weights. Make sure the combined sum of all neutral cases
2134 // doesn't exceed the value of a single likely attribute.
2135 // The additions both avoid divisions by 0 and make sure the weights of None
2136 // don't exceed the weight of Likely.
2137 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2138 const uint64_t None = Likely / (NumNone + 1);
2139 const uint64_t Unlikely = 0;
2140
2142 Result.reserve(Likelihoods.size());
2143 for (const auto LH : Likelihoods) {
2144 switch (LH) {
2145 case Stmt::LH_Unlikely:
2146 Result.push_back(Unlikely);
2147 break;
2148 case Stmt::LH_None:
2149 Result.push_back(None);
2150 break;
2151 case Stmt::LH_Likely:
2152 Result.push_back(Likely);
2153 break;
2154 }
2155 }
2156
2157 return Result;
2158}
2159
2161 // Handle nested switch statements.
2162 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2163 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2164 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2165 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2166
2167 // See if we can constant fold the condition of the switch and therefore only
2168 // emit the live case statement (if any) of the switch.
2169 llvm::APSInt ConstantCondValue;
2170 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2172 const SwitchCase *Case = nullptr;
2173 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2174 getContext(), Case)) {
2175 if (Case)
2177 RunCleanupsScope ExecutedScope(*this);
2178
2179 if (S.getInit())
2180 EmitStmt(S.getInit());
2181
2182 // Emit the condition variable if needed inside the entire cleanup scope
2183 // used by this special case for constant folded switches.
2184 if (S.getConditionVariable())
2185 EmitDecl(*S.getConditionVariable());
2186
2187 // At this point, we are no longer "within" a switch instance, so
2188 // we can temporarily enforce this to ensure that any embedded case
2189 // statements are not emitted.
2190 SwitchInsn = nullptr;
2191
2192 // Okay, we can dead code eliminate everything except this case. Emit the
2193 // specified series of statements and we're good.
2194 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2195 EmitStmt(CaseStmts[i]);
2197
2198 // Now we want to restore the saved switch instance so that nested
2199 // switches continue to function properly
2200 SwitchInsn = SavedSwitchInsn;
2201
2202 return;
2203 }
2204 }
2205
2206 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2207
2208 RunCleanupsScope ConditionScope(*this);
2209
2210 if (S.getInit())
2211 EmitStmt(S.getInit());
2212
2213 if (S.getConditionVariable())
2214 EmitDecl(*S.getConditionVariable());
2215 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2216
2217 // Create basic block to hold stuff that comes after switch
2218 // statement. We also need to create a default block now so that
2219 // explicit case ranges tests can have a place to jump to on
2220 // failure.
2221 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2222 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2223 if (PGO.haveRegionCounts()) {
2224 // Walk the SwitchCase list to find how many there are.
2225 uint64_t DefaultCount = 0;
2226 unsigned NumCases = 0;
2227 for (const SwitchCase *Case = S.getSwitchCaseList();
2228 Case;
2229 Case = Case->getNextSwitchCase()) {
2230 if (isa<DefaultStmt>(Case))
2231 DefaultCount = getProfileCount(Case);
2232 NumCases += 1;
2233 }
2234 SwitchWeights = new SmallVector<uint64_t, 16>();
2235 SwitchWeights->reserve(NumCases);
2236 // The default needs to be first. We store the edge count, so we already
2237 // know the right weight.
2238 SwitchWeights->push_back(DefaultCount);
2239 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2240 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2241 // Initialize the default case.
2242 SwitchLikelihood->push_back(Stmt::LH_None);
2243 }
2244
2245 CaseRangeBlock = DefaultBlock;
2246
2247 // Clear the insertion point to indicate we are in unreachable code.
2248 Builder.ClearInsertionPoint();
2249
2250 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2251 // then reuse last ContinueBlock.
2252 JumpDest OuterContinue;
2253 if (!BreakContinueStack.empty())
2254 OuterContinue = BreakContinueStack.back().ContinueBlock;
2255
2256 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2257
2258 // Emit switch body.
2259 EmitStmt(S.getBody());
2260
2261 BreakContinueStack.pop_back();
2262
2263 // Update the default block in case explicit case range tests have
2264 // been chained on top.
2265 SwitchInsn->setDefaultDest(CaseRangeBlock);
2266
2267 // If a default was never emitted:
2268 if (!DefaultBlock->getParent()) {
2269 // If we have cleanups, emit the default block so that there's a
2270 // place to jump through the cleanups from.
2271 if (ConditionScope.requiresCleanups()) {
2272 EmitBlock(DefaultBlock);
2273
2274 // Otherwise, just forward the default block to the switch end.
2275 } else {
2276 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2277 delete DefaultBlock;
2278 }
2279 }
2280
2281 ConditionScope.ForceCleanup();
2282
2283 // Emit continuation.
2284 EmitBlock(SwitchExit.getBlock(), true);
2286
2287 // If the switch has a condition wrapped by __builtin_unpredictable,
2288 // create metadata that specifies that the switch is unpredictable.
2289 // Don't bother if not optimizing because that metadata would not be used.
2290 auto *Call = dyn_cast<CallExpr>(S.getCond());
2291 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2292 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2293 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2294 llvm::MDBuilder MDHelper(getLLVMContext());
2295 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2296 MDHelper.createUnpredictable());
2297 }
2298 }
2299
2300 if (SwitchWeights) {
2301 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2302 "switch weights do not match switch cases");
2303 // If there's only one jump destination there's no sense weighting it.
2304 if (SwitchWeights->size() > 1)
2305 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2306 createProfileWeights(*SwitchWeights));
2307 delete SwitchWeights;
2308 } else if (SwitchLikelihood) {
2309 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2310 "switch likelihoods do not match switch cases");
2311 std::optional<SmallVector<uint64_t, 16>> LHW =
2312 getLikelihoodWeights(*SwitchLikelihood);
2313 if (LHW) {
2314 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2315 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2316 createProfileWeights(*LHW));
2317 }
2318 delete SwitchLikelihood;
2319 }
2320 SwitchInsn = SavedSwitchInsn;
2321 SwitchWeights = SavedSwitchWeights;
2322 SwitchLikelihood = SavedSwitchLikelihood;
2323 CaseRangeBlock = SavedCRBlock;
2324}
2325
2326static std::string
2327SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2329 std::string Result;
2330
2331 while (*Constraint) {
2332 switch (*Constraint) {
2333 default:
2334 Result += Target.convertConstraint(Constraint);
2335 break;
2336 // Ignore these
2337 case '*':
2338 case '?':
2339 case '!':
2340 case '=': // Will see this and the following in mult-alt constraints.
2341 case '+':
2342 break;
2343 case '#': // Ignore the rest of the constraint alternative.
2344 while (Constraint[1] && Constraint[1] != ',')
2345 Constraint++;
2346 break;
2347 case '&':
2348 case '%':
2349 Result += *Constraint;
2350 while (Constraint[1] && Constraint[1] == *Constraint)
2351 Constraint++;
2352 break;
2353 case ',':
2354 Result += "|";
2355 break;
2356 case 'g':
2357 Result += "imr";
2358 break;
2359 case '[': {
2360 assert(OutCons &&
2361 "Must pass output names to constraints with a symbolic name");
2362 unsigned Index;
2363 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2364 assert(result && "Could not resolve symbolic name"); (void)result;
2365 Result += llvm::utostr(Index);
2366 break;
2367 }
2368 }
2369
2370 Constraint++;
2371 }
2372
2373 return Result;
2374}
2375
2376/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2377/// as using a particular register add that as a constraint that will be used
2378/// in this asm stmt.
2379static std::string
2380AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2382 const AsmStmt &Stmt, const bool EarlyClobber,
2383 std::string *GCCReg = nullptr) {
2384 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2385 if (!AsmDeclRef)
2386 return Constraint;
2387 const ValueDecl &Value = *AsmDeclRef->getDecl();
2388 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2389 if (!Variable)
2390 return Constraint;
2392 return Constraint;
2393 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2394 if (!Attr)
2395 return Constraint;
2396 StringRef Register = Attr->getLabel();
2397 assert(Target.isValidGCCRegisterName(Register));
2398 // We're using validateOutputConstraint here because we only care if
2399 // this is a register constraint.
2400 TargetInfo::ConstraintInfo Info(Constraint, "");
2401 if (Target.validateOutputConstraint(Info) &&
2402 !Info.allowsRegister()) {
2403 CGM.ErrorUnsupported(&Stmt, "__asm__");
2404 return Constraint;
2405 }
2406 // Canonicalize the register here before returning it.
2407 Register = Target.getNormalizedGCCRegisterName(Register);
2408 if (GCCReg != nullptr)
2409 *GCCReg = Register.str();
2410 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2411}
2412
2413std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2414 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2415 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2416 if (Info.allowsRegister() || !Info.allowsMemory()) {
2418 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2419
2420 llvm::Type *Ty = ConvertType(InputType);
2421 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2422 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2423 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2424 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2425
2426 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2427 nullptr};
2428 }
2429 }
2430
2431 Address Addr = InputValue.getAddress();
2432 ConstraintStr += '*';
2433 return {InputValue.getPointer(*this), Addr.getElementType()};
2434}
2435
2436std::pair<llvm::Value *, llvm::Type *>
2437CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2438 const Expr *InputExpr,
2439 std::string &ConstraintStr) {
2440 // If this can't be a register or memory, i.e., has to be a constant
2441 // (immediate or symbolic), try to emit it as such.
2442 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2443 if (Info.requiresImmediateConstant()) {
2444 Expr::EvalResult EVResult;
2445 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2446
2447 llvm::APSInt IntResult;
2448 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2449 getContext()))
2450 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2451 }
2452
2454 if (InputExpr->EvaluateAsInt(Result, getContext()))
2455 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2456 nullptr};
2457 }
2458
2459 if (Info.allowsRegister() || !Info.allowsMemory())
2461 return {EmitScalarExpr(InputExpr), nullptr};
2462 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2463 return {EmitScalarExpr(InputExpr), nullptr};
2464 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2465 LValue Dest = EmitLValue(InputExpr);
2466 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2467 InputExpr->getExprLoc());
2468}
2469
2470/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2471/// asm call instruction. The !srcloc MDNode contains a list of constant
2472/// integers which are the source locations of the start of each line in the
2473/// asm.
2474static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2475 CodeGenFunction &CGF) {
2477 // Add the location of the first line to the MDNode.
2478 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2479 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2480 StringRef StrVal = Str->getString();
2481 if (!StrVal.empty()) {
2483 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2484 unsigned StartToken = 0;
2485 unsigned ByteOffset = 0;
2486
2487 // Add the location of the start of each subsequent line of the asm to the
2488 // MDNode.
2489 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2490 if (StrVal[i] != '\n') continue;
2491 SourceLocation LineLoc = Str->getLocationOfByte(
2492 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2493 Locs.push_back(llvm::ConstantAsMetadata::get(
2494 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2495 }
2496 }
2497
2498 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2499}
2500
2501static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2502 bool HasUnwindClobber, bool ReadOnly,
2503 bool ReadNone, bool NoMerge, bool NoConvergent,
2504 const AsmStmt &S,
2505 const std::vector<llvm::Type *> &ResultRegTypes,
2506 const std::vector<llvm::Type *> &ArgElemTypes,
2507 CodeGenFunction &CGF,
2508 std::vector<llvm::Value *> &RegResults) {
2509 if (!HasUnwindClobber)
2510 Result.addFnAttr(llvm::Attribute::NoUnwind);
2511
2512 if (NoMerge)
2513 Result.addFnAttr(llvm::Attribute::NoMerge);
2514 // Attach readnone and readonly attributes.
2515 if (!HasSideEffect) {
2516 if (ReadNone)
2517 Result.setDoesNotAccessMemory();
2518 else if (ReadOnly)
2519 Result.setOnlyReadsMemory();
2520 }
2521
2522 // Add elementtype attribute for indirect constraints.
2523 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2524 if (Pair.value()) {
2525 auto Attr = llvm::Attribute::get(
2526 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2527 Result.addParamAttr(Pair.index(), Attr);
2528 }
2529 }
2530
2531 // Slap the source location of the inline asm into a !srcloc metadata on the
2532 // call.
2533 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2534 Result.setMetadata("srcloc",
2535 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2536 else {
2537 // At least put the line number on MS inline asm blobs.
2538 llvm::Constant *Loc =
2539 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2540 Result.setMetadata("srcloc",
2541 llvm::MDNode::get(CGF.getLLVMContext(),
2542 llvm::ConstantAsMetadata::get(Loc)));
2543 }
2544
2545 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2546 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2547 // convergent (meaning, they may call an intrinsically convergent op, such
2548 // as bar.sync, and so can't have certain optimizations applied around
2549 // them) unless it's explicitly marked 'noconvergent'.
2550 Result.addFnAttr(llvm::Attribute::Convergent);
2551 // Extract all of the register value results from the asm.
2552 if (ResultRegTypes.size() == 1) {
2553 RegResults.push_back(&Result);
2554 } else {
2555 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2556 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2557 RegResults.push_back(Tmp);
2558 }
2559 }
2560}
2561
2562static void
2564 const llvm::ArrayRef<llvm::Value *> RegResults,
2565 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2566 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2567 const llvm::ArrayRef<LValue> ResultRegDests,
2568 const llvm::ArrayRef<QualType> ResultRegQualTys,
2569 const llvm::BitVector &ResultTypeRequiresCast,
2570 const llvm::BitVector &ResultRegIsFlagReg) {
2572 CodeGenModule &CGM = CGF.CGM;
2573 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2574
2575 assert(RegResults.size() == ResultRegTypes.size());
2576 assert(RegResults.size() == ResultTruncRegTypes.size());
2577 assert(RegResults.size() == ResultRegDests.size());
2578 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2579 // in which case its size may grow.
2580 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2581 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2582
2583 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2584 llvm::Value *Tmp = RegResults[i];
2585 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2586
2587 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2588 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2589 // value.
2590 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2591 llvm::Value *IsBooleanValue =
2592 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2593 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2594 Builder.CreateCall(FnAssume, IsBooleanValue);
2595 }
2596
2597 // If the result type of the LLVM IR asm doesn't match the result type of
2598 // the expression, do the conversion.
2599 if (ResultRegTypes[i] != TruncTy) {
2600
2601 // Truncate the integer result to the right size, note that TruncTy can be
2602 // a pointer.
2603 if (TruncTy->isFloatingPointTy())
2604 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2605 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2606 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2607 Tmp = Builder.CreateTrunc(
2608 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2609 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2610 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2611 uint64_t TmpSize =
2612 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2613 Tmp = Builder.CreatePtrToInt(
2614 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2615 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2616 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2617 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2618 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2619 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2620 }
2621 }
2622
2623 LValue Dest = ResultRegDests[i];
2624 // ResultTypeRequiresCast elements correspond to the first
2625 // ResultTypeRequiresCast.size() elements of RegResults.
2626 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2627 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2628 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2629 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2630 Builder.CreateStore(Tmp, A);
2631 continue;
2632 }
2633
2634 QualType Ty =
2635 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2636 if (Ty.isNull()) {
2637 const Expr *OutExpr = S.getOutputExpr(i);
2638 CGM.getDiags().Report(OutExpr->getExprLoc(),
2639 diag::err_store_value_to_reg);
2640 return;
2641 }
2642 Dest = CGF.MakeAddrLValue(A, Ty);
2643 }
2644 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2645 }
2646}
2647
2649 const AsmStmt &S) {
2650 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2651
2652 StringRef Asm;
2653 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2654 Asm = GCCAsm->getAsmString()->getString();
2655
2656 auto &Ctx = CGF->CGM.getLLVMContext();
2657
2658 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2659 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2660 {StrTy->getType()}, false);
2661 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2662
2663 CGF->Builder.CreateCall(UBF, {StrTy});
2664}
2665
2667 // Pop all cleanup blocks at the end of the asm statement.
2668 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2669
2670 // Assemble the final asm string.
2671 std::string AsmString = S.generateAsmString(getContext());
2672
2673 // Get all the output and input constraints together.
2674 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2675 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2676
2677 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2678 bool IsValidTargetAsm = true;
2679 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2680 StringRef Name;
2681 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2682 Name = GAS->getOutputName(i);
2683 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2684 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2685 if (IsHipStdPar && !IsValid)
2686 IsValidTargetAsm = false;
2687 else
2688 assert(IsValid && "Failed to parse output constraint");
2689 OutputConstraintInfos.push_back(Info);
2690 }
2691
2692 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2693 StringRef Name;
2694 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2695 Name = GAS->getInputName(i);
2696 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2697 bool IsValid =
2698 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2699 if (IsHipStdPar && !IsValid)
2700 IsValidTargetAsm = false;
2701 else
2702 assert(IsValid && "Failed to parse input constraint");
2703 InputConstraintInfos.push_back(Info);
2704 }
2705
2706 if (!IsValidTargetAsm)
2707 return EmitHipStdParUnsupportedAsm(this, S);
2708
2709 std::string Constraints;
2710
2711 std::vector<LValue> ResultRegDests;
2712 std::vector<QualType> ResultRegQualTys;
2713 std::vector<llvm::Type *> ResultRegTypes;
2714 std::vector<llvm::Type *> ResultTruncRegTypes;
2715 std::vector<llvm::Type *> ArgTypes;
2716 std::vector<llvm::Type *> ArgElemTypes;
2717 std::vector<llvm::Value*> Args;
2718 llvm::BitVector ResultTypeRequiresCast;
2719 llvm::BitVector ResultRegIsFlagReg;
2720
2721 // Keep track of inout constraints.
2722 std::string InOutConstraints;
2723 std::vector<llvm::Value*> InOutArgs;
2724 std::vector<llvm::Type*> InOutArgTypes;
2725 std::vector<llvm::Type*> InOutArgElemTypes;
2726
2727 // Keep track of out constraints for tied input operand.
2728 std::vector<std::string> OutputConstraints;
2729
2730 // Keep track of defined physregs.
2731 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2732
2733 // An inline asm can be marked readonly if it meets the following conditions:
2734 // - it doesn't have any sideeffects
2735 // - it doesn't clobber memory
2736 // - it doesn't return a value by-reference
2737 // It can be marked readnone if it doesn't have any input memory constraints
2738 // in addition to meeting the conditions listed above.
2739 bool ReadOnly = true, ReadNone = true;
2740
2741 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2742 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2743
2744 // Simplify the output constraint.
2745 std::string OutputConstraint(S.getOutputConstraint(i));
2746 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2747 getTarget(), &OutputConstraintInfos);
2748
2749 const Expr *OutExpr = S.getOutputExpr(i);
2750 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2751
2752 std::string GCCReg;
2753 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2754 getTarget(), CGM, S,
2755 Info.earlyClobber(),
2756 &GCCReg);
2757 // Give an error on multiple outputs to same physreg.
2758 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2759 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2760
2761 OutputConstraints.push_back(OutputConstraint);
2762 LValue Dest = EmitLValue(OutExpr);
2763 if (!Constraints.empty())
2764 Constraints += ',';
2765
2766 // If this is a register output, then make the inline asm return it
2767 // by-value. If this is a memory result, return the value by-reference.
2768 QualType QTy = OutExpr->getType();
2769 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2771 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2772
2773 Constraints += "=" + OutputConstraint;
2774 ResultRegQualTys.push_back(QTy);
2775 ResultRegDests.push_back(Dest);
2776
2777 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2778 ResultRegIsFlagReg.push_back(IsFlagReg);
2779
2780 llvm::Type *Ty = ConvertTypeForMem(QTy);
2781 const bool RequiresCast = Info.allowsRegister() &&
2783 Ty->isAggregateType());
2784
2785 ResultTruncRegTypes.push_back(Ty);
2786 ResultTypeRequiresCast.push_back(RequiresCast);
2787
2788 if (RequiresCast) {
2789 unsigned Size = getContext().getTypeSize(QTy);
2790 if (Size)
2791 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2792 else
2793 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2794 }
2795 ResultRegTypes.push_back(Ty);
2796 // If this output is tied to an input, and if the input is larger, then
2797 // we need to set the actual result type of the inline asm node to be the
2798 // same as the input type.
2799 if (Info.hasMatchingInput()) {
2800 unsigned InputNo;
2801 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2802 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2803 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2804 break;
2805 }
2806 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2807
2808 QualType InputTy = S.getInputExpr(InputNo)->getType();
2809 QualType OutputType = OutExpr->getType();
2810
2811 uint64_t InputSize = getContext().getTypeSize(InputTy);
2812 if (getContext().getTypeSize(OutputType) < InputSize) {
2813 // Form the asm to return the value as a larger integer or fp type.
2814 ResultRegTypes.back() = ConvertType(InputTy);
2815 }
2816 }
2817 if (llvm::Type* AdjTy =
2818 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2819 ResultRegTypes.back()))
2820 ResultRegTypes.back() = AdjTy;
2821 else {
2822 CGM.getDiags().Report(S.getAsmLoc(),
2823 diag::err_asm_invalid_type_in_input)
2824 << OutExpr->getType() << OutputConstraint;
2825 }
2826
2827 // Update largest vector width for any vector types.
2828 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2829 LargestVectorWidth =
2830 std::max((uint64_t)LargestVectorWidth,
2831 VT->getPrimitiveSizeInBits().getKnownMinValue());
2832 } else {
2833 Address DestAddr = Dest.getAddress();
2834 // Matrix types in memory are represented by arrays, but accessed through
2835 // vector pointers, with the alignment specified on the access operation.
2836 // For inline assembly, update pointer arguments to use vector pointers.
2837 // Otherwise there will be a mis-match if the matrix is also an
2838 // input-argument which is represented as vector.
2839 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2840 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2841
2842 ArgTypes.push_back(DestAddr.getType());
2843 ArgElemTypes.push_back(DestAddr.getElementType());
2844 Args.push_back(DestAddr.emitRawPointer(*this));
2845 Constraints += "=*";
2846 Constraints += OutputConstraint;
2847 ReadOnly = ReadNone = false;
2848 }
2849
2850 if (Info.isReadWrite()) {
2851 InOutConstraints += ',';
2852
2853 const Expr *InputExpr = S.getOutputExpr(i);
2854 llvm::Value *Arg;
2855 llvm::Type *ArgElemType;
2856 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2857 Info, Dest, InputExpr->getType(), InOutConstraints,
2858 InputExpr->getExprLoc());
2859
2860 if (llvm::Type* AdjTy =
2861 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2862 Arg->getType()))
2863 Arg = Builder.CreateBitCast(Arg, AdjTy);
2864
2865 // Update largest vector width for any vector types.
2866 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2867 LargestVectorWidth =
2868 std::max((uint64_t)LargestVectorWidth,
2869 VT->getPrimitiveSizeInBits().getKnownMinValue());
2870 // Only tie earlyclobber physregs.
2871 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2872 InOutConstraints += llvm::utostr(i);
2873 else
2874 InOutConstraints += OutputConstraint;
2875
2876 InOutArgTypes.push_back(Arg->getType());
2877 InOutArgElemTypes.push_back(ArgElemType);
2878 InOutArgs.push_back(Arg);
2879 }
2880 }
2881
2882 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2883 // to the return value slot. Only do this when returning in registers.
2884 if (isa<MSAsmStmt>(&S)) {
2885 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2886 if (RetAI.isDirect() || RetAI.isExtend()) {
2887 // Make a fake lvalue for the return value slot.
2890 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2891 ResultRegDests, AsmString, S.getNumOutputs());
2892 SawAsmBlock = true;
2893 }
2894 }
2895
2896 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2897 const Expr *InputExpr = S.getInputExpr(i);
2898
2899 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2900
2901 if (Info.allowsMemory())
2902 ReadNone = false;
2903
2904 if (!Constraints.empty())
2905 Constraints += ',';
2906
2907 // Simplify the input constraint.
2908 std::string InputConstraint(S.getInputConstraint(i));
2909 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2910 &OutputConstraintInfos);
2911
2912 InputConstraint = AddVariableConstraints(
2913 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2914 getTarget(), CGM, S, false /* No EarlyClobber */);
2915
2916 std::string ReplaceConstraint (InputConstraint);
2917 llvm::Value *Arg;
2918 llvm::Type *ArgElemType;
2919 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2920
2921 // If this input argument is tied to a larger output result, extend the
2922 // input to be the same size as the output. The LLVM backend wants to see
2923 // the input and output of a matching constraint be the same size. Note
2924 // that GCC does not define what the top bits are here. We use zext because
2925 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2926 if (Info.hasTiedOperand()) {
2927 unsigned Output = Info.getTiedOperand();
2928 QualType OutputType = S.getOutputExpr(Output)->getType();
2929 QualType InputTy = InputExpr->getType();
2930
2931 if (getContext().getTypeSize(OutputType) >
2932 getContext().getTypeSize(InputTy)) {
2933 // Use ptrtoint as appropriate so that we can do our extension.
2934 if (isa<llvm::PointerType>(Arg->getType()))
2935 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2936 llvm::Type *OutputTy = ConvertType(OutputType);
2937 if (isa<llvm::IntegerType>(OutputTy))
2938 Arg = Builder.CreateZExt(Arg, OutputTy);
2939 else if (isa<llvm::PointerType>(OutputTy))
2940 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2941 else if (OutputTy->isFloatingPointTy())
2942 Arg = Builder.CreateFPExt(Arg, OutputTy);
2943 }
2944 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2945 ReplaceConstraint = OutputConstraints[Output];
2946 }
2947 if (llvm::Type* AdjTy =
2948 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2949 Arg->getType()))
2950 Arg = Builder.CreateBitCast(Arg, AdjTy);
2951 else
2952 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2953 << InputExpr->getType() << InputConstraint;
2954
2955 // Update largest vector width for any vector types.
2956 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2957 LargestVectorWidth =
2958 std::max((uint64_t)LargestVectorWidth,
2959 VT->getPrimitiveSizeInBits().getKnownMinValue());
2960
2961 ArgTypes.push_back(Arg->getType());
2962 ArgElemTypes.push_back(ArgElemType);
2963 Args.push_back(Arg);
2964 Constraints += InputConstraint;
2965 }
2966
2967 // Append the "input" part of inout constraints.
2968 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2969 ArgTypes.push_back(InOutArgTypes[i]);
2970 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2971 Args.push_back(InOutArgs[i]);
2972 }
2973 Constraints += InOutConstraints;
2974
2975 // Labels
2977 llvm::BasicBlock *Fallthrough = nullptr;
2978 bool IsGCCAsmGoto = false;
2979 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2980 IsGCCAsmGoto = GS->isAsmGoto();
2981 if (IsGCCAsmGoto) {
2982 for (const auto *E : GS->labels()) {
2983 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2984 Transfer.push_back(Dest.getBlock());
2985 if (!Constraints.empty())
2986 Constraints += ',';
2987 Constraints += "!i";
2988 }
2989 Fallthrough = createBasicBlock("asm.fallthrough");
2990 }
2991 }
2992
2993 bool HasUnwindClobber = false;
2994
2995 // Clobbers
2996 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2997 StringRef Clobber = S.getClobber(i);
2998
2999 if (Clobber == "memory")
3000 ReadOnly = ReadNone = false;
3001 else if (Clobber == "unwind") {
3002 HasUnwindClobber = true;
3003 continue;
3004 } else if (Clobber != "cc") {
3005 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3006 if (CGM.getCodeGenOpts().StackClashProtector &&
3007 getTarget().isSPRegName(Clobber)) {
3008 CGM.getDiags().Report(S.getAsmLoc(),
3009 diag::warn_stack_clash_protection_inline_asm);
3010 }
3011 }
3012
3013 if (isa<MSAsmStmt>(&S)) {
3014 if (Clobber == "eax" || Clobber == "edx") {
3015 if (Constraints.find("=&A") != std::string::npos)
3016 continue;
3017 std::string::size_type position1 =
3018 Constraints.find("={" + Clobber.str() + "}");
3019 if (position1 != std::string::npos) {
3020 Constraints.insert(position1 + 1, "&");
3021 continue;
3022 }
3023 std::string::size_type position2 = Constraints.find("=A");
3024 if (position2 != std::string::npos) {
3025 Constraints.insert(position2 + 1, "&");
3026 continue;
3027 }
3028 }
3029 }
3030 if (!Constraints.empty())
3031 Constraints += ',';
3032
3033 Constraints += "~{";
3034 Constraints += Clobber;
3035 Constraints += '}';
3036 }
3037
3038 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3039 "unwind clobber can't be used with asm goto");
3040
3041 // Add machine specific clobbers
3042 std::string_view MachineClobbers = getTarget().getClobbers();
3043 if (!MachineClobbers.empty()) {
3044 if (!Constraints.empty())
3045 Constraints += ',';
3046 Constraints += MachineClobbers;
3047 }
3048
3049 llvm::Type *ResultType;
3050 if (ResultRegTypes.empty())
3051 ResultType = VoidTy;
3052 else if (ResultRegTypes.size() == 1)
3053 ResultType = ResultRegTypes[0];
3054 else
3055 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3056
3057 llvm::FunctionType *FTy =
3058 llvm::FunctionType::get(ResultType, ArgTypes, false);
3059
3060 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3061
3062 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3063 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3064 ? llvm::InlineAsm::AD_ATT
3065 : llvm::InlineAsm::AD_Intel;
3066 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3067 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3068
3069 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3070 FTy, AsmString, Constraints, HasSideEffect,
3071 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3072 std::vector<llvm::Value*> RegResults;
3073 llvm::CallBrInst *CBR;
3074 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3075 CBRRegResults;
3076 if (IsGCCAsmGoto) {
3077 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3078 EmitBlock(Fallthrough);
3079 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3080 ReadNone, InNoMergeAttributedStmt,
3081 InNoConvergentAttributedStmt, S, ResultRegTypes,
3082 ArgElemTypes, *this, RegResults);
3083 // Because we are emitting code top to bottom, we don't have enough
3084 // information at this point to know precisely whether we have a critical
3085 // edge. If we have outputs, split all indirect destinations.
3086 if (!RegResults.empty()) {
3087 unsigned i = 0;
3088 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3089 llvm::Twine SynthName = Dest->getName() + ".split";
3090 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3091 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3092 Builder.SetInsertPoint(SynthBB);
3093
3094 if (ResultRegTypes.size() == 1) {
3095 CBRRegResults[SynthBB].push_back(CBR);
3096 } else {
3097 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3098 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3099 CBRRegResults[SynthBB].push_back(Tmp);
3100 }
3101 }
3102
3103 EmitBranch(Dest);
3104 EmitBlock(SynthBB);
3105 CBR->setIndirectDest(i++, SynthBB);
3106 }
3107 }
3108 } else if (HasUnwindClobber) {
3109 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3110 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3111 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3112 InNoConvergentAttributedStmt, S, ResultRegTypes,
3113 ArgElemTypes, *this, RegResults);
3114 } else {
3115 llvm::CallInst *Result =
3116 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3117 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3118 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3119 InNoConvergentAttributedStmt, S, ResultRegTypes,
3120 ArgElemTypes, *this, RegResults);
3121 }
3122
3123 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3124 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3125 ResultRegIsFlagReg);
3126
3127 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3128 // different insertion point; one for each indirect destination and with
3129 // CBRRegResults rather than RegResults.
3130 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3131 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3132 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3133 Builder.SetInsertPoint(Succ, --(Succ->end()));
3134 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3135 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3136 ResultTypeRequiresCast, ResultRegIsFlagReg);
3137 }
3138 }
3139}
3140
3142 const RecordDecl *RD = S.getCapturedRecordDecl();
3143 QualType RecordTy = getContext().getRecordType(RD);
3144
3145 // Initialize the captured struct.
3146 LValue SlotLV =
3147 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3148
3149 RecordDecl::field_iterator CurField = RD->field_begin();
3150 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3151 E = S.capture_init_end();
3152 I != E; ++I, ++CurField) {
3153 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3154 if (CurField->hasCapturedVLAType()) {
3155 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3156 } else {
3157 EmitInitializerForField(*CurField, LV, *I);
3158 }
3159 }
3160
3161 return SlotLV;
3162}
3163
3164/// Generate an outlined function for the body of a CapturedStmt, store any
3165/// captured variables into the captured struct, and call the outlined function.
3166llvm::Function *
3168 LValue CapStruct = InitCapturedStruct(S);
3169
3170 // Emit the CapturedDecl
3171 CodeGenFunction CGF(CGM, true);
3172 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3173 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3174 delete CGF.CapturedStmtInfo;
3175
3176 // Emit call to the helper function.
3177 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3178
3179 return F;
3180}
3181
3183 LValue CapStruct = InitCapturedStruct(S);
3184 return CapStruct.getAddress();
3185}
3186
3187/// Creates the outlined function for a CapturedStmt.
3188llvm::Function *
3190 assert(CapturedStmtInfo &&
3191 "CapturedStmtInfo should be set when generating the captured function");
3192 const CapturedDecl *CD = S.getCapturedDecl();
3193 const RecordDecl *RD = S.getCapturedRecordDecl();
3194 SourceLocation Loc = S.getBeginLoc();
3195 assert(CD->hasBody() && "missing CapturedDecl body");
3196
3197 // Build the argument list.
3198 ASTContext &Ctx = CGM.getContext();
3199 FunctionArgList Args;
3200 Args.append(CD->param_begin(), CD->param_end());
3201
3202 // Create the function declaration.
3203 const CGFunctionInfo &FuncInfo =
3205 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3206
3207 llvm::Function *F =
3208 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3210 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3211 if (CD->isNothrow())
3212 F->addFnAttr(llvm::Attribute::NoUnwind);
3213
3214 // Generate the function.
3215 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3216 CD->getBody()->getBeginLoc());
3217 // Set the context parameter in CapturedStmtInfo.
3218 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3220
3221 // Initialize variable-length arrays.
3224 for (auto *FD : RD->fields()) {
3225 if (FD->hasCapturedVLAType()) {
3226 auto *ExprArg =
3227 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
3228 .getScalarVal();
3229 auto VAT = FD->getCapturedVLAType();
3230 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3231 }
3232 }
3233
3234 // If 'this' is captured, load it into CXXThisValue.
3237 LValue ThisLValue = EmitLValueForField(Base, FD);
3238 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3239 }
3240
3241 PGO.assignRegionCounters(GlobalDecl(CD), F);
3242 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3244
3245 return F;
3246}
3247
3248namespace {
3249// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3250// std::nullptr otherwise.
3251llvm::IntrinsicInst *getConvergenceToken(llvm::BasicBlock *BB) {
3252 for (auto &I : *BB) {
3253 auto *II = dyn_cast<llvm::IntrinsicInst>(&I);
3254 if (II && llvm::isConvergenceControlIntrinsic(II->getIntrinsicID()))
3255 return II;
3256 }
3257 return nullptr;
3258}
3259
3260} // namespace
3261
3262llvm::CallBase *
3263CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input,
3264 llvm::Value *ParentToken) {
3265 llvm::Value *bundleArgs[] = {ParentToken};
3266 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3267 auto Output = llvm::CallBase::addOperandBundle(
3268 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3269 Input->replaceAllUsesWith(Output);
3270 Input->eraseFromParent();
3271 return Output;
3272}
3273
3274llvm::IntrinsicInst *
3275CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB,
3276 llvm::Value *ParentToken) {
3277 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3278 if (BB->empty())
3279 Builder.SetInsertPoint(BB);
3280 else
3281 Builder.SetInsertPoint(BB->getFirstInsertionPt());
3282
3283 llvm::CallBase *CB = Builder.CreateIntrinsic(
3284 llvm::Intrinsic::experimental_convergence_loop, {}, {});
3285 Builder.restoreIP(IP);
3286
3287 llvm::CallBase *I = addConvergenceControlToken(CB, ParentToken);
3288 return cast<llvm::IntrinsicInst>(I);
3289}
3290
3291llvm::IntrinsicInst *
3292CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3293 llvm::BasicBlock *BB = &F->getEntryBlock();
3294 llvm::IntrinsicInst *Token = getConvergenceToken(BB);
3295 if (Token)
3296 return Token;
3297
3298 // Adding a convergence token requires the function to be marked as
3299 // convergent.
3300 F->setConvergent();
3301
3302 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3303 Builder.SetInsertPoint(&BB->front());
3304 llvm::CallBase *I = Builder.CreateIntrinsic(
3305 llvm::Intrinsic::experimental_convergence_entry, {}, {});
3306 assert(isa<llvm::IntrinsicInst>(I));
3307 Builder.restoreIP(IP);
3308
3309 return cast<llvm::IntrinsicInst>(I);
3310}
#define V(N, I)
Definition: ASTContext.h:3443
#define SM(sm)
Definition: Cuda.cpp:84
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2380
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:2051
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2648
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:2105
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2474
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2327
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition: CGStmt.cpp:1476
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1896
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2563
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition: CGStmt.cpp:1006
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1895
@ CSFC_Failure
Definition: CGStmt.cpp:1895
@ CSFC_Success
Definition: CGStmt.cpp:1895
@ CSFC_FallThrough
Definition: CGStmt.cpp:1895
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2501
const Decl * D
Expr * E
llvm::MachO::Target Target
Definition: MachO.h:51
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
SourceRange Range
Definition: SemaObjC.cpp:758
VarDecl * Variable
Definition: SemaObjC.cpp:757
SourceLocation Loc
Definition: SemaObjC.cpp:759
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:953
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
SourceManager & getSourceManager()
Definition: ASTContext.h:741
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2482
CanQualType VoidTy
Definition: ASTContext.h:1160
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3127
Attr - This represents one attribute.
Definition: Attr.h:43
Represents an attribute applied to a statement.
Definition: Stmt.h:2107
BreakStmt - This represents a break.
Definition: Stmt.h:3007
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
Expr * getCallee()
Definition: Expr.h:3024
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4673
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4735
bool isNothrow() const
Definition: Decl.cpp:5466
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4752
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4750
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5463
This captures a statement into a function.
Definition: Stmt.h:3784
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3948
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1429
CaseStmt - Represent a case statement.
Definition: Stmt.h:1828
Stmt * getSubStmt()
Definition: Stmt.h:1945
Expr * getLHS()
Definition: Stmt.h:1915
Expr * getRHS()
Definition: Stmt.h:1927
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:856
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:913
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:164
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:717
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitIfStmt(const IfStmt &S)
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void EmitOMPReverseDirective(const OMPReverseDirective &S)
static bool hasScalarEvaluationKind(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
SmallVector< llvm::IntrinsicInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
const TargetCodeGenInfo & getTargetHooks() const
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
void EmitOMPScopeDirective(const OMPScopeDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:53
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1630
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:679
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:364
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:382
LValue - This represents an lvalue references.
Definition: CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition: CGValue.h:361
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:834
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:204
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:198
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1628
Stmt *const * const_body_iterator
Definition: Stmt.h:1700
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
ContinueStmt - This represents a continue.
Definition: Stmt.h:2977
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2369
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
ValueDecl * getDecl()
Definition: Expr.h:1333
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1519
T * getAttr() const
Definition: DeclBase.h:576
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1064
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1082
SourceLocation getLocation() const
Definition: DeclBase.h:442
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1493
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2752
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3117
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3086
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3587
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3033
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2808
const Expr * getSubExpr() const
Definition: Expr.h:1057
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:4321
CallingConv getCallConv() const
Definition: Type.h:4654
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3286
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2889
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2165
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2928
Represents the declaration of a label.
Definition: Decl.h:503
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2058
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:499
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:697
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:929
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
QualType getCanonicalType() const
Definition: Type.h:7983
The collection of all-type qualifiers we support.
Definition: Type.h:324
Represents a struct/union/class.
Definition: Decl.h:4148
field_range fields() const
Definition: Decl.h:4354
field_iterator field_begin() const
Definition: Decl.cpp:5092
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3046
Expr * getRetValue()
Definition: Stmt.h:3077
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
@ NoStmtClass
Definition: Stmt.h:87
StmtClass getStmtClass() const
Definition: Stmt.h:1380
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1323
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1324
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1325
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1327
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:170
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:345
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:162
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1959
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1325
StringRef getString() const
Definition: Expr.h:1855
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1801
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2415
Exposes information about the current target.
Definition: TargetInfo.h:220
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:839
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:701
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:742
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Token - This structure provides full information about a lexed token.
Definition: Token.h:36
bool isVoidType() const
Definition: Type.h:8510
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8800
bool isReferenceType() const
Definition: Type.h:8204
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:671
Represents a variable declaration or definition.
Definition: Decl.h:882
StorageClass getStorageClass() const
Returns the storage class as written in the source.
Definition: Decl.h:1119
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2611
Defines the clang::TargetInfo interface.
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:654
bool Ret(InterpState &S, CodePtr &PC)
Definition: Interp.h:318
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:257
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:294
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1131
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1138