clang 20.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46namespace llvm {
47extern cl::opt<bool> EnableSingleByteCoverage;
48} // namespace llvm
49
50void CodeGenFunction::EmitStopPoint(const Stmt *S) {
51 if (CGDebugInfo *DI = getDebugInfo()) {
53 Loc = S->getBeginLoc();
54 DI->EmitLocation(Builder, Loc);
55
56 LastStopPoint = Loc;
57 }
58}
59
61 assert(S && "Null statement?");
62 PGO.setCurrentStmt(S);
63
64 // These statements have their own debug info handling.
65 if (EmitSimpleStmt(S, Attrs))
66 return;
67
68 // Check if we are generating unreachable code.
69 if (!HaveInsertPoint()) {
70 // If so, and the statement doesn't contain a label, then we do not need to
71 // generate actual code. This is safe because (1) the current point is
72 // unreachable, so we don't need to execute the code, and (2) we've already
73 // handled the statements which update internal data structures (like the
74 // local variable map) which could be used by subsequent statements.
75 if (!ContainsLabel(S)) {
76 // Verify that any decl statements were handled as simple, they may be in
77 // scope of subsequent reachable statements.
78 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
79 return;
80 }
81
82 // Otherwise, make a new block to hold the code.
84 }
85
86 // Generate a stoppoint if we are emitting debug info.
88
89 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
90 // enabled.
91 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
92 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
94 return;
95 }
96 }
97
98 switch (S->getStmtClass()) {
100 case Stmt::CXXCatchStmtClass:
101 case Stmt::SEHExceptStmtClass:
102 case Stmt::SEHFinallyStmtClass:
103 case Stmt::MSDependentExistsStmtClass:
104 llvm_unreachable("invalid statement class to emit generically");
105 case Stmt::NullStmtClass:
106 case Stmt::CompoundStmtClass:
107 case Stmt::DeclStmtClass:
108 case Stmt::LabelStmtClass:
109 case Stmt::AttributedStmtClass:
110 case Stmt::GotoStmtClass:
111 case Stmt::BreakStmtClass:
112 case Stmt::ContinueStmtClass:
113 case Stmt::DefaultStmtClass:
114 case Stmt::CaseStmtClass:
115 case Stmt::SEHLeaveStmtClass:
116 llvm_unreachable("should have emitted these statements as simple");
117
118#define STMT(Type, Base)
119#define ABSTRACT_STMT(Op)
120#define EXPR(Type, Base) \
121 case Stmt::Type##Class:
122#include "clang/AST/StmtNodes.inc"
123 {
124 // Remember the block we came in on.
125 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
126 assert(incoming && "expression emission must have an insertion point");
127
128 EmitIgnoredExpr(cast<Expr>(S));
129
130 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
131 assert(outgoing && "expression emission cleared block!");
132
133 // The expression emitters assume (reasonably!) that the insertion
134 // point is always set. To maintain that, the call-emission code
135 // for noreturn functions has to enter a new block with no
136 // predecessors. We want to kill that block and mark the current
137 // insertion point unreachable in the common case of a call like
138 // "exit();". Since expression emission doesn't otherwise create
139 // blocks with no predecessors, we can just test for that.
140 // However, we must be careful not to do this to our incoming
141 // block, because *statement* emission does sometimes create
142 // reachable blocks which will have no predecessors until later in
143 // the function. This occurs with, e.g., labels that are not
144 // reachable by fallthrough.
145 if (incoming != outgoing && outgoing->use_empty()) {
146 outgoing->eraseFromParent();
147 Builder.ClearInsertionPoint();
148 }
149 break;
150 }
151
152 case Stmt::IndirectGotoStmtClass:
153 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
154
155 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
156 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
157 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
158 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
159
160 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
161
162 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
163 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
164 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
165 case Stmt::CoroutineBodyStmtClass:
166 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
167 break;
168 case Stmt::CoreturnStmtClass:
169 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
170 break;
171 case Stmt::CapturedStmtClass: {
172 const CapturedStmt *CS = cast<CapturedStmt>(S);
174 }
175 break;
176 case Stmt::ObjCAtTryStmtClass:
177 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
178 break;
179 case Stmt::ObjCAtCatchStmtClass:
180 llvm_unreachable(
181 "@catch statements should be handled by EmitObjCAtTryStmt");
182 case Stmt::ObjCAtFinallyStmtClass:
183 llvm_unreachable(
184 "@finally statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtThrowStmtClass:
186 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
187 break;
188 case Stmt::ObjCAtSynchronizedStmtClass:
189 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
190 break;
191 case Stmt::ObjCForCollectionStmtClass:
192 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
193 break;
194 case Stmt::ObjCAutoreleasePoolStmtClass:
195 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
196 break;
197
198 case Stmt::CXXTryStmtClass:
199 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
200 break;
201 case Stmt::CXXForRangeStmtClass:
202 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
203 break;
204 case Stmt::SEHTryStmtClass:
205 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
206 break;
207 case Stmt::OMPMetaDirectiveClass:
208 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
209 break;
210 case Stmt::OMPCanonicalLoopClass:
211 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
212 break;
213 case Stmt::OMPParallelDirectiveClass:
214 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
215 break;
216 case Stmt::OMPSimdDirectiveClass:
217 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
218 break;
219 case Stmt::OMPTileDirectiveClass:
220 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
221 break;
222 case Stmt::OMPUnrollDirectiveClass:
223 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
224 break;
225 case Stmt::OMPReverseDirectiveClass:
226 EmitOMPReverseDirective(cast<OMPReverseDirective>(*S));
227 break;
228 case Stmt::OMPInterchangeDirectiveClass:
229 EmitOMPInterchangeDirective(cast<OMPInterchangeDirective>(*S));
230 break;
231 case Stmt::OMPForDirectiveClass:
232 EmitOMPForDirective(cast<OMPForDirective>(*S));
233 break;
234 case Stmt::OMPForSimdDirectiveClass:
235 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
236 break;
237 case Stmt::OMPSectionsDirectiveClass:
238 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
239 break;
240 case Stmt::OMPSectionDirectiveClass:
241 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
242 break;
243 case Stmt::OMPSingleDirectiveClass:
244 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
245 break;
246 case Stmt::OMPMasterDirectiveClass:
247 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
248 break;
249 case Stmt::OMPCriticalDirectiveClass:
250 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
251 break;
252 case Stmt::OMPParallelForDirectiveClass:
253 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
254 break;
255 case Stmt::OMPParallelForSimdDirectiveClass:
256 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
257 break;
258 case Stmt::OMPParallelMasterDirectiveClass:
259 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
260 break;
261 case Stmt::OMPParallelSectionsDirectiveClass:
262 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
263 break;
264 case Stmt::OMPTaskDirectiveClass:
265 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
266 break;
267 case Stmt::OMPTaskyieldDirectiveClass:
268 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
269 break;
270 case Stmt::OMPErrorDirectiveClass:
271 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
272 break;
273 case Stmt::OMPBarrierDirectiveClass:
274 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
275 break;
276 case Stmt::OMPTaskwaitDirectiveClass:
277 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
278 break;
279 case Stmt::OMPTaskgroupDirectiveClass:
280 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
281 break;
282 case Stmt::OMPFlushDirectiveClass:
283 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
284 break;
285 case Stmt::OMPDepobjDirectiveClass:
286 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
287 break;
288 case Stmt::OMPScanDirectiveClass:
289 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
290 break;
291 case Stmt::OMPOrderedDirectiveClass:
292 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
293 break;
294 case Stmt::OMPAtomicDirectiveClass:
295 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
296 break;
297 case Stmt::OMPTargetDirectiveClass:
298 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
299 break;
300 case Stmt::OMPTeamsDirectiveClass:
301 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
302 break;
303 case Stmt::OMPCancellationPointDirectiveClass:
304 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
305 break;
306 case Stmt::OMPCancelDirectiveClass:
307 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
308 break;
309 case Stmt::OMPTargetDataDirectiveClass:
310 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
311 break;
312 case Stmt::OMPTargetEnterDataDirectiveClass:
313 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
314 break;
315 case Stmt::OMPTargetExitDataDirectiveClass:
316 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
317 break;
318 case Stmt::OMPTargetParallelDirectiveClass:
319 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
320 break;
321 case Stmt::OMPTargetParallelForDirectiveClass:
322 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
323 break;
324 case Stmt::OMPTaskLoopDirectiveClass:
325 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
326 break;
327 case Stmt::OMPTaskLoopSimdDirectiveClass:
328 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
329 break;
330 case Stmt::OMPMasterTaskLoopDirectiveClass:
331 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
332 break;
333 case Stmt::OMPMaskedTaskLoopDirectiveClass:
334 llvm_unreachable("masked taskloop directive not supported yet.");
335 break;
336 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
338 cast<OMPMasterTaskLoopSimdDirective>(*S));
339 break;
340 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
341 llvm_unreachable("masked taskloop simd directive not supported yet.");
342 break;
343 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
345 cast<OMPParallelMasterTaskLoopDirective>(*S));
346 break;
347 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
348 llvm_unreachable("parallel masked taskloop directive not supported yet.");
349 break;
350 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
352 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
353 break;
354 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
355 llvm_unreachable(
356 "parallel masked taskloop simd directive not supported yet.");
357 break;
358 case Stmt::OMPDistributeDirectiveClass:
359 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
360 break;
361 case Stmt::OMPTargetUpdateDirectiveClass:
362 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
363 break;
364 case Stmt::OMPDistributeParallelForDirectiveClass:
366 cast<OMPDistributeParallelForDirective>(*S));
367 break;
368 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
370 cast<OMPDistributeParallelForSimdDirective>(*S));
371 break;
372 case Stmt::OMPDistributeSimdDirectiveClass:
373 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
374 break;
375 case Stmt::OMPTargetParallelForSimdDirectiveClass:
377 cast<OMPTargetParallelForSimdDirective>(*S));
378 break;
379 case Stmt::OMPTargetSimdDirectiveClass:
380 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
381 break;
382 case Stmt::OMPTeamsDistributeDirectiveClass:
383 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
384 break;
385 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
387 cast<OMPTeamsDistributeSimdDirective>(*S));
388 break;
389 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
391 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
392 break;
393 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
395 cast<OMPTeamsDistributeParallelForDirective>(*S));
396 break;
397 case Stmt::OMPTargetTeamsDirectiveClass:
398 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
399 break;
400 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
402 cast<OMPTargetTeamsDistributeDirective>(*S));
403 break;
404 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
406 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
407 break;
408 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
410 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
411 break;
412 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
414 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
415 break;
416 case Stmt::OMPInteropDirectiveClass:
417 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
418 break;
419 case Stmt::OMPDispatchDirectiveClass:
420 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
421 break;
422 case Stmt::OMPScopeDirectiveClass:
423 EmitOMPScopeDirective(cast<OMPScopeDirective>(*S));
424 break;
425 case Stmt::OMPMaskedDirectiveClass:
426 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
427 break;
428 case Stmt::OMPGenericLoopDirectiveClass:
429 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
430 break;
431 case Stmt::OMPTeamsGenericLoopDirectiveClass:
432 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
433 break;
434 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
436 cast<OMPTargetTeamsGenericLoopDirective>(*S));
437 break;
438 case Stmt::OMPParallelGenericLoopDirectiveClass:
440 cast<OMPParallelGenericLoopDirective>(*S));
441 break;
442 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
444 cast<OMPTargetParallelGenericLoopDirective>(*S));
445 break;
446 case Stmt::OMPParallelMaskedDirectiveClass:
447 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
448 break;
449 case Stmt::OMPAssumeDirectiveClass:
450 EmitOMPAssumeDirective(cast<OMPAssumeDirective>(*S));
451 break;
452 case Stmt::OpenACCComputeConstructClass:
453 EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
454 break;
455 case Stmt::OpenACCLoopConstructClass:
456 EmitOpenACCLoopConstruct(cast<OpenACCLoopConstruct>(*S));
457 break;
458 case Stmt::OpenACCCombinedConstructClass:
459 EmitOpenACCCombinedConstruct(cast<OpenACCCombinedConstruct>(*S));
460 break;
461 case Stmt::OpenACCDataConstructClass:
462 EmitOpenACCDataConstruct(cast<OpenACCDataConstruct>(*S));
463 break;
464 case Stmt::OpenACCEnterDataConstructClass:
465 EmitOpenACCEnterDataConstruct(cast<OpenACCEnterDataConstruct>(*S));
466 break;
467 case Stmt::OpenACCExitDataConstructClass:
468 EmitOpenACCExitDataConstruct(cast<OpenACCExitDataConstruct>(*S));
469 break;
470 case Stmt::OpenACCHostDataConstructClass:
471 EmitOpenACCHostDataConstruct(cast<OpenACCHostDataConstruct>(*S));
472 break;
473 case Stmt::OpenACCWaitConstructClass:
474 EmitOpenACCWaitConstruct(cast<OpenACCWaitConstruct>(*S));
475 break;
476 case Stmt::OpenACCInitConstructClass:
477 EmitOpenACCInitConstruct(cast<OpenACCInitConstruct>(*S));
478 break;
479 case Stmt::OpenACCShutdownConstructClass:
480 EmitOpenACCShutdownConstruct(cast<OpenACCShutdownConstruct>(*S));
481 break;
482 }
483}
484
487 switch (S->getStmtClass()) {
488 default:
489 return false;
490 case Stmt::NullStmtClass:
491 break;
492 case Stmt::CompoundStmtClass:
493 EmitCompoundStmt(cast<CompoundStmt>(*S));
494 break;
495 case Stmt::DeclStmtClass:
496 EmitDeclStmt(cast<DeclStmt>(*S));
497 break;
498 case Stmt::LabelStmtClass:
499 EmitLabelStmt(cast<LabelStmt>(*S));
500 break;
501 case Stmt::AttributedStmtClass:
502 EmitAttributedStmt(cast<AttributedStmt>(*S));
503 break;
504 case Stmt::GotoStmtClass:
505 EmitGotoStmt(cast<GotoStmt>(*S));
506 break;
507 case Stmt::BreakStmtClass:
508 EmitBreakStmt(cast<BreakStmt>(*S));
509 break;
510 case Stmt::ContinueStmtClass:
511 EmitContinueStmt(cast<ContinueStmt>(*S));
512 break;
513 case Stmt::DefaultStmtClass:
514 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
515 break;
516 case Stmt::CaseStmtClass:
517 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
518 break;
519 case Stmt::SEHLeaveStmtClass:
520 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
521 break;
522 }
523 return true;
524}
525
526/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
527/// this captures the expression result of the last sub-statement and returns it
528/// (for use by the statement expression extension).
530 AggValueSlot AggSlot) {
531 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
532 "LLVM IR generation of compound statement ('{}')");
533
534 // Keep track of the current cleanup stack depth, including debug scopes.
535 LexicalScope Scope(*this, S.getSourceRange());
536
537 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
538}
539
542 bool GetLast,
543 AggValueSlot AggSlot) {
544
545 const Stmt *ExprResult = S.getStmtExprResult();
546 assert((!GetLast || (GetLast && ExprResult)) &&
547 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
548
549 Address RetAlloca = Address::invalid();
550
551 for (auto *CurStmt : S.body()) {
552 if (GetLast && ExprResult == CurStmt) {
553 // We have to special case labels here. They are statements, but when put
554 // at the end of a statement expression, they yield the value of their
555 // subexpression. Handle this by walking through all labels we encounter,
556 // emitting them before we evaluate the subexpr.
557 // Similar issues arise for attributed statements.
558 while (!isa<Expr>(ExprResult)) {
559 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
560 EmitLabel(LS->getDecl());
561 ExprResult = LS->getSubStmt();
562 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
563 // FIXME: Update this if we ever have attributes that affect the
564 // semantics of an expression.
565 ExprResult = AS->getSubStmt();
566 } else {
567 llvm_unreachable("unknown value statement");
568 }
569 }
570
572
573 const Expr *E = cast<Expr>(ExprResult);
574 QualType ExprTy = E->getType();
575 if (hasAggregateEvaluationKind(ExprTy)) {
576 EmitAggExpr(E, AggSlot);
577 } else {
578 // We can't return an RValue here because there might be cleanups at
579 // the end of the StmtExpr. Because of that, we have to emit the result
580 // here into a temporary alloca.
581 RetAlloca = CreateMemTemp(ExprTy);
582 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
583 /*IsInit*/ false);
584 }
585 } else {
586 EmitStmt(CurStmt);
587 }
588 }
589
590 return RetAlloca;
591}
592
593void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
594 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
595
596 // If there is a cleanup stack, then we it isn't worth trying to
597 // simplify this block (we would need to remove it from the scope map
598 // and cleanup entry).
599 if (!EHStack.empty())
600 return;
601
602 // Can only simplify direct branches.
603 if (!BI || !BI->isUnconditional())
604 return;
605
606 // Can only simplify empty blocks.
607 if (BI->getIterator() != BB->begin())
608 return;
609
610 BB->replaceAllUsesWith(BI->getSuccessor(0));
611 BI->eraseFromParent();
612 BB->eraseFromParent();
613}
614
615void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
616 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
617
618 // Fall out of the current block (if necessary).
619 EmitBranch(BB);
620
621 if (IsFinished && BB->use_empty()) {
622 delete BB;
623 return;
624 }
625
626 // Place the block after the current block, if possible, or else at
627 // the end of the function.
628 if (CurBB && CurBB->getParent())
629 CurFn->insert(std::next(CurBB->getIterator()), BB);
630 else
631 CurFn->insert(CurFn->end(), BB);
632 Builder.SetInsertPoint(BB);
633}
634
635void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
636 // Emit a branch from the current block to the target one if this
637 // was a real block. If this was just a fall-through block after a
638 // terminator, don't emit it.
639 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
640
641 if (!CurBB || CurBB->getTerminator()) {
642 // If there is no insert point or the previous block is already
643 // terminated, don't touch it.
644 } else {
645 // Otherwise, create a fall-through branch.
646 Builder.CreateBr(Target);
647 }
648
649 Builder.ClearInsertionPoint();
650}
651
652void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
653 bool inserted = false;
654 for (llvm::User *u : block->users()) {
655 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
656 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
657 inserted = true;
658 break;
659 }
660 }
661
662 if (!inserted)
663 CurFn->insert(CurFn->end(), block);
664
665 Builder.SetInsertPoint(block);
666}
667
668CodeGenFunction::JumpDest
670 JumpDest &Dest = LabelMap[D];
671 if (Dest.isValid()) return Dest;
672
673 // Create, but don't insert, the new block.
674 Dest = JumpDest(createBasicBlock(D->getName()),
677 return Dest;
678}
679
681 // Add this label to the current lexical scope if we're within any
682 // normal cleanups. Jumps "in" to this label --- when permitted by
683 // the language --- may need to be routed around such cleanups.
684 if (EHStack.hasNormalCleanups() && CurLexicalScope)
685 CurLexicalScope->addLabel(D);
686
687 JumpDest &Dest = LabelMap[D];
688
689 // If we didn't need a forward reference to this label, just go
690 // ahead and create a destination at the current scope.
691 if (!Dest.isValid()) {
692 Dest = getJumpDestInCurrentScope(D->getName());
693
694 // Otherwise, we need to give this label a target depth and remove
695 // it from the branch-fixups list.
696 } else {
697 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
698 Dest.setScopeDepth(EHStack.stable_begin());
699 ResolveBranchFixups(Dest.getBlock());
700 }
701
702 EmitBlock(Dest.getBlock());
703
704 // Emit debug info for labels.
705 if (CGDebugInfo *DI = getDebugInfo()) {
707 DI->setLocation(D->getLocation());
708 DI->EmitLabel(D, Builder);
709 }
710 }
711
712 incrementProfileCounter(D->getStmt());
713}
714
715/// Change the cleanup scope of the labels in this lexical scope to
716/// match the scope of the enclosing context.
718 assert(!Labels.empty());
719 EHScopeStack::stable_iterator innermostScope
721
722 // Change the scope depth of all the labels.
724 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
725 assert(CGF.LabelMap.count(*i));
726 JumpDest &dest = CGF.LabelMap.find(*i)->second;
727 assert(dest.getScopeDepth().isValid());
728 assert(innermostScope.encloses(dest.getScopeDepth()));
729 dest.setScopeDepth(innermostScope);
730 }
731
732 // Reparent the labels if the new scope also has cleanups.
733 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
734 ParentScope->Labels.append(Labels.begin(), Labels.end());
735 }
736}
737
738
740 EmitLabel(S.getDecl());
741
742 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
743 if (getLangOpts().EHAsynch && S.isSideEntry())
745
746 EmitStmt(S.getSubStmt());
747}
748
750 bool nomerge = false;
751 bool noinline = false;
752 bool alwaysinline = false;
753 bool noconvergent = false;
754 const CallExpr *musttail = nullptr;
755
756 for (const auto *A : S.getAttrs()) {
757 switch (A->getKind()) {
758 default:
759 break;
760 case attr::NoMerge:
761 nomerge = true;
762 break;
763 case attr::NoInline:
764 noinline = true;
765 break;
766 case attr::AlwaysInline:
767 alwaysinline = true;
768 break;
769 case attr::NoConvergent:
770 noconvergent = true;
771 break;
772 case attr::MustTail: {
773 const Stmt *Sub = S.getSubStmt();
774 const ReturnStmt *R = cast<ReturnStmt>(Sub);
775 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
776 } break;
777 case attr::CXXAssume: {
778 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
779 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
780 !Assumption->HasSideEffects(getContext())) {
781 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
782 Builder.CreateAssumption(AssumptionVal);
783 }
784 } break;
785 }
786 }
787 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
788 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
789 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
790 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
791 SaveAndRestore save_musttail(MustTailCall, musttail);
792 EmitStmt(S.getSubStmt(), S.getAttrs());
793}
794
796 // If this code is reachable then emit a stop point (if generating
797 // debug info). We have to do this ourselves because we are on the
798 // "simple" statement path.
799 if (HaveInsertPoint())
800 EmitStopPoint(&S);
801
803}
804
805
807 if (const LabelDecl *Target = S.getConstantTarget()) {
809 return;
810 }
811
812 // Ensure that we have an i8* for our PHI node.
813 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
814 Int8PtrTy, "addr");
815 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
816
817 // Get the basic block for the indirect goto.
818 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
819
820 // The first instruction in the block has to be the PHI for the switch dest,
821 // add an entry for this branch.
822 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
823
824 EmitBranch(IndGotoBB);
825}
826
827void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
828 const Stmt *Else = S.getElse();
829
830 // The else branch of a consteval if statement is always the only branch that
831 // can be runtime evaluated.
832 if (S.isConsteval()) {
833 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
834 if (Executed) {
835 RunCleanupsScope ExecutedScope(*this);
836 EmitStmt(Executed);
837 }
838 return;
839 }
840
841 // C99 6.8.4.1: The first substatement is executed if the expression compares
842 // unequal to 0. The condition must be a scalar type.
843 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
844 ApplyDebugLocation DL(*this, S.getCond());
845
846 if (S.getInit())
847 EmitStmt(S.getInit());
848
849 if (S.getConditionVariable())
850 EmitDecl(*S.getConditionVariable());
851
852 // If the condition constant folds and can be elided, try to avoid emitting
853 // the condition and the dead arm of the if/else.
854 bool CondConstant;
855 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
856 S.isConstexpr())) {
857 // Figure out which block (then or else) is executed.
858 const Stmt *Executed = S.getThen();
859 const Stmt *Skipped = Else;
860 if (!CondConstant) // Condition false?
861 std::swap(Executed, Skipped);
862
863 // If the skipped block has no labels in it, just emit the executed block.
864 // This avoids emitting dead code and simplifies the CFG substantially.
865 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
866 if (CondConstant)
868 if (Executed) {
869 RunCleanupsScope ExecutedScope(*this);
870 EmitStmt(Executed);
871 }
872 return;
873 }
874 }
875
876 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
877 // the conditional branch.
878 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
879 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
880 llvm::BasicBlock *ElseBlock = ContBlock;
881 if (Else)
882 ElseBlock = createBasicBlock("if.else");
883
884 // Prefer the PGO based weights over the likelihood attribute.
885 // When the build isn't optimized the metadata isn't used, so don't generate
886 // it.
887 // Also, differentiate between disabled PGO and a never executed branch with
888 // PGO. Assuming PGO is in use:
889 // - we want to ignore the [[likely]] attribute if the branch is never
890 // executed,
891 // - assuming the profile is poor, preserving the attribute may still be
892 // beneficial.
893 // As an approximation, preserve the attribute only if both the branch and the
894 // parent context were not executed.
896 uint64_t ThenCount = getProfileCount(S.getThen());
897 if (!ThenCount && !getCurrentProfileCount() &&
898 CGM.getCodeGenOpts().OptimizationLevel)
899 LH = Stmt::getLikelihood(S.getThen(), Else);
900
901 // When measuring MC/DC, always fully evaluate the condition up front using
902 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
903 // executing the body of the if.then or if.else. This is useful for when
904 // there is a 'return' within the body, but this is particularly beneficial
905 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
906 // updates are kept linear and consistent.
907 if (!CGM.getCodeGenOpts().MCDCCoverage)
908 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
909 else {
910 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
911 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
912 }
913
914 // Emit the 'then' code.
915 EmitBlock(ThenBlock);
917 incrementProfileCounter(S.getThen());
918 else
920 {
921 RunCleanupsScope ThenScope(*this);
922 EmitStmt(S.getThen());
923 }
924 EmitBranch(ContBlock);
925
926 // Emit the 'else' code if present.
927 if (Else) {
928 {
929 // There is no need to emit line number for an unconditional branch.
930 auto NL = ApplyDebugLocation::CreateEmpty(*this);
931 EmitBlock(ElseBlock);
932 }
933 // When single byte coverage mode is enabled, add a counter to else block.
936 {
937 RunCleanupsScope ElseScope(*this);
938 EmitStmt(Else);
939 }
940 {
941 // There is no need to emit line number for an unconditional branch.
942 auto NL = ApplyDebugLocation::CreateEmpty(*this);
943 EmitBranch(ContBlock);
944 }
945 }
946
947 // Emit the continuation block for code after the if.
948 EmitBlock(ContBlock, true);
949
950 // When single byte coverage mode is enabled, add a counter to continuation
951 // block.
954}
955
956bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
957 bool HasEmptyBody) {
958 if (CGM.getCodeGenOpts().getFiniteLoops() ==
960 return false;
961
962 // Now apply rules for plain C (see 6.8.5.6 in C11).
963 // Loops with constant conditions do not have to make progress in any C
964 // version.
965 // As an extension, we consisider loops whose constant expression
966 // can be constant-folded.
968 bool CondIsConstInt =
969 !ControllingExpression ||
970 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
971 Result.Val.isInt());
972
973 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
974 Result.Val.getInt().getBoolValue());
975
976 // Loops with non-constant conditions must make progress in C11 and later.
977 if (getLangOpts().C11 && !CondIsConstInt)
978 return true;
979
980 // [C++26][intro.progress] (DR)
981 // The implementation may assume that any thread will eventually do one of the
982 // following:
983 // [...]
984 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
985 if (CGM.getCodeGenOpts().getFiniteLoops() ==
987 getLangOpts().CPlusPlus11) {
988 if (HasEmptyBody && CondIsTrue) {
989 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
990 return false;
991 }
992 return true;
993 }
994 return false;
995}
996
997// [C++26][stmt.iter.general] (DR)
998// A trivially empty iteration statement is an iteration statement matching one
999// of the following forms:
1000// - while ( expression ) ;
1001// - while ( expression ) { }
1002// - do ; while ( expression ) ;
1003// - do { } while ( expression ) ;
1004// - for ( init-statement expression(opt); ) ;
1005// - for ( init-statement expression(opt); ) { }
1006template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1007 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1008 if (S.getInc())
1009 return false;
1010 }
1011 const Stmt *Body = S.getBody();
1012 if (!Body || isa<NullStmt>(Body))
1013 return true;
1014 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1015 return Compound->body_empty();
1016 return false;
1017}
1018
1020 ArrayRef<const Attr *> WhileAttrs) {
1021 // Emit the header for the loop, which will also become
1022 // the continue target.
1023 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1024 EmitBlock(LoopHeader.getBlock());
1025
1027 ConvergenceTokenStack.push_back(
1028 emitConvergenceLoopToken(LoopHeader.getBlock()));
1029
1030 // Create an exit block for when the condition fails, which will
1031 // also become the break target.
1032 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
1033
1034 // Store the blocks to use for break and continue.
1035 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
1036
1037 // C++ [stmt.while]p2:
1038 // When the condition of a while statement is a declaration, the
1039 // scope of the variable that is declared extends from its point
1040 // of declaration (3.3.2) to the end of the while statement.
1041 // [...]
1042 // The object created in a condition is destroyed and created
1043 // with each iteration of the loop.
1044 RunCleanupsScope ConditionScope(*this);
1045
1046 if (S.getConditionVariable())
1047 EmitDecl(*S.getConditionVariable());
1048
1049 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1050 // evaluation of the controlling expression takes place before each
1051 // execution of the loop body.
1052 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1053
1054 // while(1) is common, avoid extra exit blocks. Be sure
1055 // to correctly handle break/continue though.
1056 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1057 bool EmitBoolCondBranch = !C || !C->isOne();
1058 const SourceRange &R = S.getSourceRange();
1059 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1060 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1062 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1063
1064 // When single byte coverage mode is enabled, add a counter to loop condition.
1066 incrementProfileCounter(S.getCond());
1067
1068 // As long as the condition is true, go to the loop body.
1069 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1070 if (EmitBoolCondBranch) {
1071 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1072 if (ConditionScope.requiresCleanups())
1073 ExitBlock = createBasicBlock("while.exit");
1074 llvm::MDNode *Weights =
1075 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1076 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1077 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1078 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1079 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1080
1081 if (ExitBlock != LoopExit.getBlock()) {
1082 EmitBlock(ExitBlock);
1084 }
1085 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1086 CGM.getDiags().Report(A->getLocation(),
1087 diag::warn_attribute_has_no_effect_on_infinite_loop)
1088 << A << A->getRange();
1090 S.getWhileLoc(),
1091 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1092 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
1093 }
1094
1095 // Emit the loop body. We have to emit this in a cleanup scope
1096 // because it might be a singleton DeclStmt.
1097 {
1098 RunCleanupsScope BodyScope(*this);
1099 EmitBlock(LoopBody);
1100 // When single byte coverage mode is enabled, add a counter to the body.
1102 incrementProfileCounter(S.getBody());
1103 else
1105 EmitStmt(S.getBody());
1106 }
1107
1108 BreakContinueStack.pop_back();
1109
1110 // Immediately force cleanup.
1111 ConditionScope.ForceCleanup();
1112
1113 EmitStopPoint(&S);
1114 // Branch to the loop header again.
1115 EmitBranch(LoopHeader.getBlock());
1116
1117 LoopStack.pop();
1118
1119 // Emit the exit block.
1120 EmitBlock(LoopExit.getBlock(), true);
1121
1122 // The LoopHeader typically is just a branch if we skipped emitting
1123 // a branch, try to erase it.
1124 if (!EmitBoolCondBranch)
1125 SimplifyForwardingBlocks(LoopHeader.getBlock());
1126
1127 // When single byte coverage mode is enabled, add a counter to continuation
1128 // block.
1131
1133 ConvergenceTokenStack.pop_back();
1134}
1135
1137 ArrayRef<const Attr *> DoAttrs) {
1138 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
1139 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1140
1141 uint64_t ParentCount = getCurrentProfileCount();
1142
1143 // Store the blocks to use for break and continue.
1144 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
1145
1146 // Emit the body of the loop.
1147 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1148
1150 EmitBlockWithFallThrough(LoopBody, S.getBody());
1151 else
1152 EmitBlockWithFallThrough(LoopBody, &S);
1153
1155 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody));
1156
1157 {
1158 RunCleanupsScope BodyScope(*this);
1159 EmitStmt(S.getBody());
1160 }
1161
1162 EmitBlock(LoopCond.getBlock());
1163 // When single byte coverage mode is enabled, add a counter to loop condition.
1165 incrementProfileCounter(S.getCond());
1166
1167 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1168 // after each execution of the loop body."
1169
1170 // Evaluate the conditional in the while header.
1171 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1172 // compares unequal to 0. The condition must be a scalar type.
1173 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1174
1175 BreakContinueStack.pop_back();
1176
1177 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1178 // to correctly handle break/continue though.
1179 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1180 bool EmitBoolCondBranch = !C || !C->isZero();
1181
1182 const SourceRange &R = S.getSourceRange();
1183 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1186 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1187
1188 // As long as the condition is true, iterate the loop.
1189 if (EmitBoolCondBranch) {
1190 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1191 Builder.CreateCondBr(
1192 BoolCondVal, LoopBody, LoopExit.getBlock(),
1193 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1194 }
1195
1196 LoopStack.pop();
1197
1198 // Emit the exit block.
1199 EmitBlock(LoopExit.getBlock());
1200
1201 // The DoCond block typically is just a branch if we skipped
1202 // emitting a branch, try to erase it.
1203 if (!EmitBoolCondBranch)
1204 SimplifyForwardingBlocks(LoopCond.getBlock());
1205
1206 // When single byte coverage mode is enabled, add a counter to continuation
1207 // block.
1210
1212 ConvergenceTokenStack.pop_back();
1213}
1214
1216 ArrayRef<const Attr *> ForAttrs) {
1217 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1218
1219 LexicalScope ForScope(*this, S.getSourceRange());
1220
1221 // Evaluate the first part before the loop.
1222 if (S.getInit())
1223 EmitStmt(S.getInit());
1224
1225 // Start the loop with a block that tests the condition.
1226 // If there's an increment, the continue scope will be overwritten
1227 // later.
1228 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1229 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1230 EmitBlock(CondBlock);
1231
1233 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1234
1235 const SourceRange &R = S.getSourceRange();
1236 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1239 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1240
1241 // Create a cleanup scope for the condition variable cleanups.
1242 LexicalScope ConditionScope(*this, S.getSourceRange());
1243
1244 // If the for loop doesn't have an increment we can just use the condition as
1245 // the continue block. Otherwise, if there is no condition variable, we can
1246 // form the continue block now. If there is a condition variable, we can't
1247 // form the continue block until after we've emitted the condition, because
1248 // the condition is in scope in the increment, but Sema's jump diagnostics
1249 // ensure that there are no continues from the condition variable that jump
1250 // to the loop increment.
1251 JumpDest Continue;
1252 if (!S.getInc())
1253 Continue = CondDest;
1254 else if (!S.getConditionVariable())
1255 Continue = getJumpDestInCurrentScope("for.inc");
1256 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1257
1258 if (S.getCond()) {
1259 // If the for statement has a condition scope, emit the local variable
1260 // declaration.
1261 if (S.getConditionVariable()) {
1262 EmitDecl(*S.getConditionVariable());
1263
1264 // We have entered the condition variable's scope, so we're now able to
1265 // jump to the continue block.
1266 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1267 BreakContinueStack.back().ContinueBlock = Continue;
1268 }
1269
1270 // When single byte coverage mode is enabled, add a counter to loop
1271 // condition.
1273 incrementProfileCounter(S.getCond());
1274
1275 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1276 // If there are any cleanups between here and the loop-exit scope,
1277 // create a block to stage a loop exit along.
1278 if (ForScope.requiresCleanups())
1279 ExitBlock = createBasicBlock("for.cond.cleanup");
1280
1281 // As long as the condition is true, iterate the loop.
1282 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1283
1284 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1285 // compares unequal to 0. The condition must be a scalar type.
1286 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1287 llvm::MDNode *Weights =
1288 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1289 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1290 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1291 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1292
1293 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1294
1295 if (ExitBlock != LoopExit.getBlock()) {
1296 EmitBlock(ExitBlock);
1298 }
1299
1300 EmitBlock(ForBody);
1301 } else {
1302 // Treat it as a non-zero constant. Don't even create a new block for the
1303 // body, just fall into it.
1304 }
1305
1306 // When single byte coverage mode is enabled, add a counter to the body.
1308 incrementProfileCounter(S.getBody());
1309 else
1311 {
1312 // Create a separate cleanup scope for the body, in case it is not
1313 // a compound statement.
1314 RunCleanupsScope BodyScope(*this);
1315 EmitStmt(S.getBody());
1316 }
1317
1318 // If there is an increment, emit it next.
1319 if (S.getInc()) {
1320 EmitBlock(Continue.getBlock());
1321 EmitStmt(S.getInc());
1323 incrementProfileCounter(S.getInc());
1324 }
1325
1326 BreakContinueStack.pop_back();
1327
1328 ConditionScope.ForceCleanup();
1329
1330 EmitStopPoint(&S);
1331 EmitBranch(CondBlock);
1332
1333 ForScope.ForceCleanup();
1334
1335 LoopStack.pop();
1336
1337 // Emit the fall-through block.
1338 EmitBlock(LoopExit.getBlock(), true);
1339
1340 // When single byte coverage mode is enabled, add a counter to continuation
1341 // block.
1344
1346 ConvergenceTokenStack.pop_back();
1347}
1348
1349void
1351 ArrayRef<const Attr *> ForAttrs) {
1352 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1353
1354 LexicalScope ForScope(*this, S.getSourceRange());
1355
1356 // Evaluate the first pieces before the loop.
1357 if (S.getInit())
1358 EmitStmt(S.getInit());
1359 EmitStmt(S.getRangeStmt());
1360 EmitStmt(S.getBeginStmt());
1361 EmitStmt(S.getEndStmt());
1362
1363 // Start the loop with a block that tests the condition.
1364 // If there's an increment, the continue scope will be overwritten
1365 // later.
1366 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1367 EmitBlock(CondBlock);
1368
1370 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1371
1372 const SourceRange &R = S.getSourceRange();
1373 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1376
1377 // If there are any cleanups between here and the loop-exit scope,
1378 // create a block to stage a loop exit along.
1379 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1380 if (ForScope.requiresCleanups())
1381 ExitBlock = createBasicBlock("for.cond.cleanup");
1382
1383 // The loop body, consisting of the specified body and the loop variable.
1384 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1385
1386 // The body is executed if the expression, contextually converted
1387 // to bool, is true.
1388 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1389 llvm::MDNode *Weights =
1390 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1391 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1392 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1393 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1394 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1395
1396 if (ExitBlock != LoopExit.getBlock()) {
1397 EmitBlock(ExitBlock);
1399 }
1400
1401 EmitBlock(ForBody);
1403 incrementProfileCounter(S.getBody());
1404 else
1406
1407 // Create a block for the increment. In case of a 'continue', we jump there.
1408 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1409
1410 // Store the blocks to use for break and continue.
1411 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1412
1413 {
1414 // Create a separate cleanup scope for the loop variable and body.
1415 LexicalScope BodyScope(*this, S.getSourceRange());
1416 EmitStmt(S.getLoopVarStmt());
1417 EmitStmt(S.getBody());
1418 }
1419
1420 EmitStopPoint(&S);
1421 // If there is an increment, emit it next.
1422 EmitBlock(Continue.getBlock());
1423 EmitStmt(S.getInc());
1424
1425 BreakContinueStack.pop_back();
1426
1427 EmitBranch(CondBlock);
1428
1429 ForScope.ForceCleanup();
1430
1431 LoopStack.pop();
1432
1433 // Emit the fall-through block.
1434 EmitBlock(LoopExit.getBlock(), true);
1435
1436 // When single byte coverage mode is enabled, add a counter to continuation
1437 // block.
1440
1442 ConvergenceTokenStack.pop_back();
1443}
1444
1445void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1446 if (RV.isScalar()) {
1448 } else if (RV.isAggregate()) {
1449 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1452 } else {
1454 /*init*/ true);
1455 }
1457}
1458
1459namespace {
1460// RAII struct used to save and restore a return statment's result expression.
1461struct SaveRetExprRAII {
1462 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1463 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1464 CGF.RetExpr = RetExpr;
1465 }
1466 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1467 const Expr *OldRetExpr;
1468 CodeGenFunction &CGF;
1469};
1470} // namespace
1471
1472/// Determine if the given call uses the swiftasync calling convention.
1473static bool isSwiftAsyncCallee(const CallExpr *CE) {
1474 auto calleeQualType = CE->getCallee()->getType();
1475 const FunctionType *calleeType = nullptr;
1476 if (calleeQualType->isFunctionPointerType() ||
1477 calleeQualType->isFunctionReferenceType() ||
1478 calleeQualType->isBlockPointerType() ||
1479 calleeQualType->isMemberFunctionPointerType()) {
1480 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1481 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1482 calleeType = ty;
1483 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1484 if (auto methodDecl = CMCE->getMethodDecl()) {
1485 // getMethodDecl() doesn't handle member pointers at the moment.
1486 calleeType = methodDecl->getType()->castAs<FunctionType>();
1487 } else {
1488 return false;
1489 }
1490 } else {
1491 return false;
1492 }
1493 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1494}
1495
1496/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1497/// if the function returns void, or may be missing one if the function returns
1498/// non-void. Fun stuff :).
1500 if (requiresReturnValueCheck()) {
1501 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1502 auto *SLocPtr =
1503 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1504 llvm::GlobalVariable::PrivateLinkage, SLoc);
1505 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1507 assert(ReturnLocation.isValid() && "No valid return location");
1508 Builder.CreateStore(SLocPtr, ReturnLocation);
1509 }
1510
1511 // Returning from an outlined SEH helper is UB, and we already warn on it.
1512 if (IsOutlinedSEHHelper) {
1513 Builder.CreateUnreachable();
1514 Builder.ClearInsertionPoint();
1515 }
1516
1517 // Emit the result value, even if unused, to evaluate the side effects.
1518 const Expr *RV = S.getRetValue();
1519
1520 // Record the result expression of the return statement. The recorded
1521 // expression is used to determine whether a block capture's lifetime should
1522 // end at the end of the full expression as opposed to the end of the scope
1523 // enclosing the block expression.
1524 //
1525 // This permits a small, easily-implemented exception to our over-conservative
1526 // rules about not jumping to statements following block literals with
1527 // non-trivial cleanups.
1528 SaveRetExprRAII SaveRetExpr(RV, *this);
1529
1530 RunCleanupsScope cleanupScope(*this);
1531 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1532 RV = EWC->getSubExpr();
1533
1534 // If we're in a swiftasynccall function, and the return expression is a
1535 // call to a swiftasynccall function, mark the call as the musttail call.
1536 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1537 if (RV && CurFnInfo &&
1539 if (auto CE = dyn_cast<CallExpr>(RV)) {
1540 if (isSwiftAsyncCallee(CE)) {
1541 SaveMustTail.emplace(MustTailCall, CE);
1542 }
1543 }
1544 }
1545
1546 // FIXME: Clean this up by using an LValue for ReturnTemp,
1547 // EmitStoreThroughLValue, and EmitAnyExpr.
1548 // Check if the NRVO candidate was not globalized in OpenMP mode.
1549 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1550 S.getNRVOCandidate()->isNRVOVariable() &&
1551 (!getLangOpts().OpenMP ||
1553 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1554 .isValid())) {
1555 // Apply the named return value optimization for this return statement,
1556 // which means doing nothing: the appropriate result has already been
1557 // constructed into the NRVO variable.
1558
1559 // If there is an NRVO flag for this variable, set it to 1 into indicate
1560 // that the cleanup code should not destroy the variable.
1561 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1562 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1563 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1564 // Make sure not to return anything, but evaluate the expression
1565 // for side effects.
1566 if (RV) {
1567 EmitAnyExpr(RV);
1568 }
1569 } else if (!RV) {
1570 // Do nothing (return value is left uninitialized)
1571 } else if (FnRetTy->isReferenceType()) {
1572 // If this function returns a reference, take the address of the expression
1573 // rather than the value.
1575 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1576 } else {
1577 switch (getEvaluationKind(RV->getType())) {
1578 case TEK_Scalar: {
1579 llvm::Value *Ret = EmitScalarExpr(RV);
1582 /*isInit*/ true);
1583 else
1585 break;
1586 }
1587 case TEK_Complex:
1589 /*isInit*/ true);
1590 break;
1591 case TEK_Aggregate:
1598 break;
1599 }
1600 }
1601
1602 ++NumReturnExprs;
1603 if (!RV || RV->isEvaluatable(getContext()))
1604 ++NumSimpleReturnExprs;
1605
1606 cleanupScope.ForceCleanup();
1608}
1609
1611 // As long as debug info is modeled with instructions, we have to ensure we
1612 // have a place to insert here and write the stop point here.
1613 if (HaveInsertPoint())
1614 EmitStopPoint(&S);
1615
1616 for (const auto *I : S.decls())
1617 EmitDecl(*I);
1618}
1619
1621 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1622
1623 // If this code is reachable then emit a stop point (if generating
1624 // debug info). We have to do this ourselves because we are on the
1625 // "simple" statement path.
1626 if (HaveInsertPoint())
1627 EmitStopPoint(&S);
1628
1629 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1630}
1631
1633 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1634
1635 // If this code is reachable then emit a stop point (if generating
1636 // debug info). We have to do this ourselves because we are on the
1637 // "simple" statement path.
1638 if (HaveInsertPoint())
1639 EmitStopPoint(&S);
1640
1641 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1642}
1643
1644/// EmitCaseStmtRange - If case statement range is not too big then
1645/// add multiple cases to switch instruction, one for each value within
1646/// the range. If range is too big then emit "if" condition check.
1648 ArrayRef<const Attr *> Attrs) {
1649 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1650
1651 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1652 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1653
1654 // Emit the code for this case. We do this first to make sure it is
1655 // properly chained from our predecessor before generating the
1656 // switch machinery to enter this block.
1657 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1658 EmitBlockWithFallThrough(CaseDest, &S);
1659 EmitStmt(S.getSubStmt());
1660
1661 // If range is empty, do nothing.
1662 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1663 return;
1664
1666 llvm::APInt Range = RHS - LHS;
1667 // FIXME: parameters such as this should not be hardcoded.
1668 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1669 // Range is small enough to add multiple switch instruction cases.
1670 uint64_t Total = getProfileCount(&S);
1671 unsigned NCases = Range.getZExtValue() + 1;
1672 // We only have one region counter for the entire set of cases here, so we
1673 // need to divide the weights evenly between the generated cases, ensuring
1674 // that the total weight is preserved. E.g., a weight of 5 over three cases
1675 // will be distributed as weights of 2, 2, and 1.
1676 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1677 for (unsigned I = 0; I != NCases; ++I) {
1678 if (SwitchWeights)
1679 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1680 else if (SwitchLikelihood)
1681 SwitchLikelihood->push_back(LH);
1682
1683 if (Rem)
1684 Rem--;
1685 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1686 ++LHS;
1687 }
1688 return;
1689 }
1690
1691 // The range is too big. Emit "if" condition into a new block,
1692 // making sure to save and restore the current insertion point.
1693 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1694
1695 // Push this test onto the chain of range checks (which terminates
1696 // in the default basic block). The switch's default will be changed
1697 // to the top of this chain after switch emission is complete.
1698 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1699 CaseRangeBlock = createBasicBlock("sw.caserange");
1700
1701 CurFn->insert(CurFn->end(), CaseRangeBlock);
1702 Builder.SetInsertPoint(CaseRangeBlock);
1703
1704 // Emit range check.
1705 llvm::Value *Diff =
1706 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1707 llvm::Value *Cond =
1708 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1709
1710 llvm::MDNode *Weights = nullptr;
1711 if (SwitchWeights) {
1712 uint64_t ThisCount = getProfileCount(&S);
1713 uint64_t DefaultCount = (*SwitchWeights)[0];
1714 Weights = createProfileWeights(ThisCount, DefaultCount);
1715
1716 // Since we're chaining the switch default through each large case range, we
1717 // need to update the weight for the default, ie, the first case, to include
1718 // this case.
1719 (*SwitchWeights)[0] += ThisCount;
1720 } else if (SwitchLikelihood)
1721 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1722
1723 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1724
1725 // Restore the appropriate insertion point.
1726 if (RestoreBB)
1727 Builder.SetInsertPoint(RestoreBB);
1728 else
1729 Builder.ClearInsertionPoint();
1730}
1731
1733 ArrayRef<const Attr *> Attrs) {
1734 // If there is no enclosing switch instance that we're aware of, then this
1735 // case statement and its block can be elided. This situation only happens
1736 // when we've constant-folded the switch, are emitting the constant case,
1737 // and part of the constant case includes another case statement. For
1738 // instance: switch (4) { case 4: do { case 5: } while (1); }
1739 if (!SwitchInsn) {
1740 EmitStmt(S.getSubStmt());
1741 return;
1742 }
1743
1744 // Handle case ranges.
1745 if (S.getRHS()) {
1746 EmitCaseStmtRange(S, Attrs);
1747 return;
1748 }
1749
1750 llvm::ConstantInt *CaseVal =
1751 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1752
1753 // Emit debuginfo for the case value if it is an enum value.
1754 const ConstantExpr *CE;
1755 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1756 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1757 else
1758 CE = dyn_cast<ConstantExpr>(S.getLHS());
1759 if (CE) {
1760 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1761 if (CGDebugInfo *Dbg = getDebugInfo())
1763 Dbg->EmitGlobalVariable(DE->getDecl(),
1764 APValue(llvm::APSInt(CaseVal->getValue())));
1765 }
1766
1767 if (SwitchLikelihood)
1768 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1769
1770 // If the body of the case is just a 'break', try to not emit an empty block.
1771 // If we're profiling or we're not optimizing, leave the block in for better
1772 // debug and coverage analysis.
1774 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1775 isa<BreakStmt>(S.getSubStmt())) {
1776 JumpDest Block = BreakContinueStack.back().BreakBlock;
1777
1778 // Only do this optimization if there are no cleanups that need emitting.
1780 if (SwitchWeights)
1781 SwitchWeights->push_back(getProfileCount(&S));
1782 SwitchInsn->addCase(CaseVal, Block.getBlock());
1783
1784 // If there was a fallthrough into this case, make sure to redirect it to
1785 // the end of the switch as well.
1786 if (Builder.GetInsertBlock()) {
1787 Builder.CreateBr(Block.getBlock());
1788 Builder.ClearInsertionPoint();
1789 }
1790 return;
1791 }
1792 }
1793
1794 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1795 EmitBlockWithFallThrough(CaseDest, &S);
1796 if (SwitchWeights)
1797 SwitchWeights->push_back(getProfileCount(&S));
1798 SwitchInsn->addCase(CaseVal, CaseDest);
1799
1800 // Recursively emitting the statement is acceptable, but is not wonderful for
1801 // code where we have many case statements nested together, i.e.:
1802 // case 1:
1803 // case 2:
1804 // case 3: etc.
1805 // Handling this recursively will create a new block for each case statement
1806 // that falls through to the next case which is IR intensive. It also causes
1807 // deep recursion which can run into stack depth limitations. Handle
1808 // sequential non-range case statements specially.
1809 //
1810 // TODO When the next case has a likelihood attribute the code returns to the
1811 // recursive algorithm. Maybe improve this case if it becomes common practice
1812 // to use a lot of attributes.
1813 const CaseStmt *CurCase = &S;
1814 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1815
1816 // Otherwise, iteratively add consecutive cases to this switch stmt.
1817 while (NextCase && NextCase->getRHS() == nullptr) {
1818 CurCase = NextCase;
1819 llvm::ConstantInt *CaseVal =
1820 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1821
1822 if (SwitchWeights)
1823 SwitchWeights->push_back(getProfileCount(NextCase));
1825 CaseDest = createBasicBlock("sw.bb");
1826 EmitBlockWithFallThrough(CaseDest, CurCase);
1827 }
1828 // Since this loop is only executed when the CaseStmt has no attributes
1829 // use a hard-coded value.
1830 if (SwitchLikelihood)
1831 SwitchLikelihood->push_back(Stmt::LH_None);
1832
1833 SwitchInsn->addCase(CaseVal, CaseDest);
1834 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1835 }
1836
1837 // Generate a stop point for debug info if the case statement is
1838 // followed by a default statement. A fallthrough case before a
1839 // default case gets its own branch target.
1840 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1841 EmitStopPoint(CurCase);
1842
1843 // Normal default recursion for non-cases.
1844 EmitStmt(CurCase->getSubStmt());
1845}
1846
1848 ArrayRef<const Attr *> Attrs) {
1849 // If there is no enclosing switch instance that we're aware of, then this
1850 // default statement can be elided. This situation only happens when we've
1851 // constant-folded the switch.
1852 if (!SwitchInsn) {
1853 EmitStmt(S.getSubStmt());
1854 return;
1855 }
1856
1857 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1858 assert(DefaultBlock->empty() &&
1859 "EmitDefaultStmt: Default block already defined?");
1860
1861 if (SwitchLikelihood)
1862 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1863
1864 EmitBlockWithFallThrough(DefaultBlock, &S);
1865
1866 EmitStmt(S.getSubStmt());
1867}
1868
1869/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1870/// constant value that is being switched on, see if we can dead code eliminate
1871/// the body of the switch to a simple series of statements to emit. Basically,
1872/// on a switch (5) we want to find these statements:
1873/// case 5:
1874/// printf(...); <--
1875/// ++i; <--
1876/// break;
1877///
1878/// and add them to the ResultStmts vector. If it is unsafe to do this
1879/// transformation (for example, one of the elided statements contains a label
1880/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1881/// should include statements after it (e.g. the printf() line is a substmt of
1882/// the case) then return CSFC_FallThrough. If we handled it and found a break
1883/// statement, then return CSFC_Success.
1884///
1885/// If Case is non-null, then we are looking for the specified case, checking
1886/// that nothing we jump over contains labels. If Case is null, then we found
1887/// the case and are looking for the break.
1888///
1889/// If the recursive walk actually finds our Case, then we set FoundCase to
1890/// true.
1891///
1894 const SwitchCase *Case,
1895 bool &FoundCase,
1896 SmallVectorImpl<const Stmt*> &ResultStmts) {
1897 // If this is a null statement, just succeed.
1898 if (!S)
1899 return Case ? CSFC_Success : CSFC_FallThrough;
1900
1901 // If this is the switchcase (case 4: or default) that we're looking for, then
1902 // we're in business. Just add the substatement.
1903 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1904 if (S == Case) {
1905 FoundCase = true;
1906 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1907 ResultStmts);
1908 }
1909
1910 // Otherwise, this is some other case or default statement, just ignore it.
1911 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1912 ResultStmts);
1913 }
1914
1915 // If we are in the live part of the code and we found our break statement,
1916 // return a success!
1917 if (!Case && isa<BreakStmt>(S))
1918 return CSFC_Success;
1919
1920 // If this is a switch statement, then it might contain the SwitchCase, the
1921 // break, or neither.
1922 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1923 // Handle this as two cases: we might be looking for the SwitchCase (if so
1924 // the skipped statements must be skippable) or we might already have it.
1925 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1926 bool StartedInLiveCode = FoundCase;
1927 unsigned StartSize = ResultStmts.size();
1928
1929 // If we've not found the case yet, scan through looking for it.
1930 if (Case) {
1931 // Keep track of whether we see a skipped declaration. The code could be
1932 // using the declaration even if it is skipped, so we can't optimize out
1933 // the decl if the kept statements might refer to it.
1934 bool HadSkippedDecl = false;
1935
1936 // If we're looking for the case, just see if we can skip each of the
1937 // substatements.
1938 for (; Case && I != E; ++I) {
1939 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1940
1941 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1942 case CSFC_Failure: return CSFC_Failure;
1943 case CSFC_Success:
1944 // A successful result means that either 1) that the statement doesn't
1945 // have the case and is skippable, or 2) does contain the case value
1946 // and also contains the break to exit the switch. In the later case,
1947 // we just verify the rest of the statements are elidable.
1948 if (FoundCase) {
1949 // If we found the case and skipped declarations, we can't do the
1950 // optimization.
1951 if (HadSkippedDecl)
1952 return CSFC_Failure;
1953
1954 for (++I; I != E; ++I)
1955 if (CodeGenFunction::ContainsLabel(*I, true))
1956 return CSFC_Failure;
1957 return CSFC_Success;
1958 }
1959 break;
1960 case CSFC_FallThrough:
1961 // If we have a fallthrough condition, then we must have found the
1962 // case started to include statements. Consider the rest of the
1963 // statements in the compound statement as candidates for inclusion.
1964 assert(FoundCase && "Didn't find case but returned fallthrough?");
1965 // We recursively found Case, so we're not looking for it anymore.
1966 Case = nullptr;
1967
1968 // If we found the case and skipped declarations, we can't do the
1969 // optimization.
1970 if (HadSkippedDecl)
1971 return CSFC_Failure;
1972 break;
1973 }
1974 }
1975
1976 if (!FoundCase)
1977 return CSFC_Success;
1978
1979 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1980 }
1981
1982 // If we have statements in our range, then we know that the statements are
1983 // live and need to be added to the set of statements we're tracking.
1984 bool AnyDecls = false;
1985 for (; I != E; ++I) {
1987
1988 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1989 case CSFC_Failure: return CSFC_Failure;
1990 case CSFC_FallThrough:
1991 // A fallthrough result means that the statement was simple and just
1992 // included in ResultStmt, keep adding them afterwards.
1993 break;
1994 case CSFC_Success:
1995 // A successful result means that we found the break statement and
1996 // stopped statement inclusion. We just ensure that any leftover stmts
1997 // are skippable and return success ourselves.
1998 for (++I; I != E; ++I)
1999 if (CodeGenFunction::ContainsLabel(*I, true))
2000 return CSFC_Failure;
2001 return CSFC_Success;
2002 }
2003 }
2004
2005 // If we're about to fall out of a scope without hitting a 'break;', we
2006 // can't perform the optimization if there were any decls in that scope
2007 // (we'd lose their end-of-lifetime).
2008 if (AnyDecls) {
2009 // If the entire compound statement was live, there's one more thing we
2010 // can try before giving up: emit the whole thing as a single statement.
2011 // We can do that unless the statement contains a 'break;'.
2012 // FIXME: Such a break must be at the end of a construct within this one.
2013 // We could emit this by just ignoring the BreakStmts entirely.
2014 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2015 ResultStmts.resize(StartSize);
2016 ResultStmts.push_back(S);
2017 } else {
2018 return CSFC_Failure;
2019 }
2020 }
2021
2022 return CSFC_FallThrough;
2023 }
2024
2025 // Okay, this is some other statement that we don't handle explicitly, like a
2026 // for statement or increment etc. If we are skipping over this statement,
2027 // just verify it doesn't have labels, which would make it invalid to elide.
2028 if (Case) {
2029 if (CodeGenFunction::ContainsLabel(S, true))
2030 return CSFC_Failure;
2031 return CSFC_Success;
2032 }
2033
2034 // Otherwise, we want to include this statement. Everything is cool with that
2035 // so long as it doesn't contain a break out of the switch we're in.
2037
2038 // Otherwise, everything is great. Include the statement and tell the caller
2039 // that we fall through and include the next statement as well.
2040 ResultStmts.push_back(S);
2041 return CSFC_FallThrough;
2042}
2043
2044/// FindCaseStatementsForValue - Find the case statement being jumped to and
2045/// then invoke CollectStatementsForCase to find the list of statements to emit
2046/// for a switch on constant. See the comment above CollectStatementsForCase
2047/// for more details.
2049 const llvm::APSInt &ConstantCondValue,
2050 SmallVectorImpl<const Stmt*> &ResultStmts,
2051 ASTContext &C,
2052 const SwitchCase *&ResultCase) {
2053 // First step, find the switch case that is being branched to. We can do this
2054 // efficiently by scanning the SwitchCase list.
2055 const SwitchCase *Case = S.getSwitchCaseList();
2056 const DefaultStmt *DefaultCase = nullptr;
2057
2058 for (; Case; Case = Case->getNextSwitchCase()) {
2059 // It's either a default or case. Just remember the default statement in
2060 // case we're not jumping to any numbered cases.
2061 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2062 DefaultCase = DS;
2063 continue;
2064 }
2065
2066 // Check to see if this case is the one we're looking for.
2067 const CaseStmt *CS = cast<CaseStmt>(Case);
2068 // Don't handle case ranges yet.
2069 if (CS->getRHS()) return false;
2070
2071 // If we found our case, remember it as 'case'.
2072 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2073 break;
2074 }
2075
2076 // If we didn't find a matching case, we use a default if it exists, or we
2077 // elide the whole switch body!
2078 if (!Case) {
2079 // It is safe to elide the body of the switch if it doesn't contain labels
2080 // etc. If it is safe, return successfully with an empty ResultStmts list.
2081 if (!DefaultCase)
2083 Case = DefaultCase;
2084 }
2085
2086 // Ok, we know which case is being jumped to, try to collect all the
2087 // statements that follow it. This can fail for a variety of reasons. Also,
2088 // check to see that the recursive walk actually found our case statement.
2089 // Insane cases like this can fail to find it in the recursive walk since we
2090 // don't handle every stmt kind:
2091 // switch (4) {
2092 // while (1) {
2093 // case 4: ...
2094 bool FoundCase = false;
2095 ResultCase = Case;
2096 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2097 ResultStmts) != CSFC_Failure &&
2098 FoundCase;
2099}
2100
2101static std::optional<SmallVector<uint64_t, 16>>
2103 // Are there enough branches to weight them?
2104 if (Likelihoods.size() <= 1)
2105 return std::nullopt;
2106
2107 uint64_t NumUnlikely = 0;
2108 uint64_t NumNone = 0;
2109 uint64_t NumLikely = 0;
2110 for (const auto LH : Likelihoods) {
2111 switch (LH) {
2112 case Stmt::LH_Unlikely:
2113 ++NumUnlikely;
2114 break;
2115 case Stmt::LH_None:
2116 ++NumNone;
2117 break;
2118 case Stmt::LH_Likely:
2119 ++NumLikely;
2120 break;
2121 }
2122 }
2123
2124 // Is there a likelihood attribute used?
2125 if (NumUnlikely == 0 && NumLikely == 0)
2126 return std::nullopt;
2127
2128 // When multiple cases share the same code they can be combined during
2129 // optimization. In that case the weights of the branch will be the sum of
2130 // the individual weights. Make sure the combined sum of all neutral cases
2131 // doesn't exceed the value of a single likely attribute.
2132 // The additions both avoid divisions by 0 and make sure the weights of None
2133 // don't exceed the weight of Likely.
2134 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2135 const uint64_t None = Likely / (NumNone + 1);
2136 const uint64_t Unlikely = 0;
2137
2139 Result.reserve(Likelihoods.size());
2140 for (const auto LH : Likelihoods) {
2141 switch (LH) {
2142 case Stmt::LH_Unlikely:
2143 Result.push_back(Unlikely);
2144 break;
2145 case Stmt::LH_None:
2146 Result.push_back(None);
2147 break;
2148 case Stmt::LH_Likely:
2149 Result.push_back(Likely);
2150 break;
2151 }
2152 }
2153
2154 return Result;
2155}
2156
2158 // Handle nested switch statements.
2159 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2160 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2161 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2162 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2163
2164 // See if we can constant fold the condition of the switch and therefore only
2165 // emit the live case statement (if any) of the switch.
2166 llvm::APSInt ConstantCondValue;
2167 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2169 const SwitchCase *Case = nullptr;
2170 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2171 getContext(), Case)) {
2172 if (Case)
2174 RunCleanupsScope ExecutedScope(*this);
2175
2176 if (S.getInit())
2177 EmitStmt(S.getInit());
2178
2179 // Emit the condition variable if needed inside the entire cleanup scope
2180 // used by this special case for constant folded switches.
2181 if (S.getConditionVariable())
2182 EmitDecl(*S.getConditionVariable());
2183
2184 // At this point, we are no longer "within" a switch instance, so
2185 // we can temporarily enforce this to ensure that any embedded case
2186 // statements are not emitted.
2187 SwitchInsn = nullptr;
2188
2189 // Okay, we can dead code eliminate everything except this case. Emit the
2190 // specified series of statements and we're good.
2191 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2192 EmitStmt(CaseStmts[i]);
2194
2195 // Now we want to restore the saved switch instance so that nested
2196 // switches continue to function properly
2197 SwitchInsn = SavedSwitchInsn;
2198
2199 return;
2200 }
2201 }
2202
2203 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2204
2205 RunCleanupsScope ConditionScope(*this);
2206
2207 if (S.getInit())
2208 EmitStmt(S.getInit());
2209
2210 if (S.getConditionVariable())
2211 EmitDecl(*S.getConditionVariable());
2212 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2213
2214 // Create basic block to hold stuff that comes after switch
2215 // statement. We also need to create a default block now so that
2216 // explicit case ranges tests can have a place to jump to on
2217 // failure.
2218 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2219 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2220 if (PGO.haveRegionCounts()) {
2221 // Walk the SwitchCase list to find how many there are.
2222 uint64_t DefaultCount = 0;
2223 unsigned NumCases = 0;
2224 for (const SwitchCase *Case = S.getSwitchCaseList();
2225 Case;
2226 Case = Case->getNextSwitchCase()) {
2227 if (isa<DefaultStmt>(Case))
2228 DefaultCount = getProfileCount(Case);
2229 NumCases += 1;
2230 }
2231 SwitchWeights = new SmallVector<uint64_t, 16>();
2232 SwitchWeights->reserve(NumCases);
2233 // The default needs to be first. We store the edge count, so we already
2234 // know the right weight.
2235 SwitchWeights->push_back(DefaultCount);
2236 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2237 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2238 // Initialize the default case.
2239 SwitchLikelihood->push_back(Stmt::LH_None);
2240 }
2241
2242 CaseRangeBlock = DefaultBlock;
2243
2244 // Clear the insertion point to indicate we are in unreachable code.
2245 Builder.ClearInsertionPoint();
2246
2247 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2248 // then reuse last ContinueBlock.
2249 JumpDest OuterContinue;
2250 if (!BreakContinueStack.empty())
2251 OuterContinue = BreakContinueStack.back().ContinueBlock;
2252
2253 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2254
2255 // Emit switch body.
2256 EmitStmt(S.getBody());
2257
2258 BreakContinueStack.pop_back();
2259
2260 // Update the default block in case explicit case range tests have
2261 // been chained on top.
2262 SwitchInsn->setDefaultDest(CaseRangeBlock);
2263
2264 // If a default was never emitted:
2265 if (!DefaultBlock->getParent()) {
2266 // If we have cleanups, emit the default block so that there's a
2267 // place to jump through the cleanups from.
2268 if (ConditionScope.requiresCleanups()) {
2269 EmitBlock(DefaultBlock);
2270
2271 // Otherwise, just forward the default block to the switch end.
2272 } else {
2273 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2274 delete DefaultBlock;
2275 }
2276 }
2277
2278 ConditionScope.ForceCleanup();
2279
2280 // Emit continuation.
2281 EmitBlock(SwitchExit.getBlock(), true);
2283
2284 // If the switch has a condition wrapped by __builtin_unpredictable,
2285 // create metadata that specifies that the switch is unpredictable.
2286 // Don't bother if not optimizing because that metadata would not be used.
2287 auto *Call = dyn_cast<CallExpr>(S.getCond());
2288 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2289 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2290 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2291 llvm::MDBuilder MDHelper(getLLVMContext());
2292 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2293 MDHelper.createUnpredictable());
2294 }
2295 }
2296
2297 if (SwitchWeights) {
2298 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2299 "switch weights do not match switch cases");
2300 // If there's only one jump destination there's no sense weighting it.
2301 if (SwitchWeights->size() > 1)
2302 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2303 createProfileWeights(*SwitchWeights));
2304 delete SwitchWeights;
2305 } else if (SwitchLikelihood) {
2306 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2307 "switch likelihoods do not match switch cases");
2308 std::optional<SmallVector<uint64_t, 16>> LHW =
2309 getLikelihoodWeights(*SwitchLikelihood);
2310 if (LHW) {
2311 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2312 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2313 createProfileWeights(*LHW));
2314 }
2315 delete SwitchLikelihood;
2316 }
2317 SwitchInsn = SavedSwitchInsn;
2318 SwitchWeights = SavedSwitchWeights;
2319 SwitchLikelihood = SavedSwitchLikelihood;
2320 CaseRangeBlock = SavedCRBlock;
2321}
2322
2323static std::string
2324SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2326 std::string Result;
2327
2328 while (*Constraint) {
2329 switch (*Constraint) {
2330 default:
2331 Result += Target.convertConstraint(Constraint);
2332 break;
2333 // Ignore these
2334 case '*':
2335 case '?':
2336 case '!':
2337 case '=': // Will see this and the following in mult-alt constraints.
2338 case '+':
2339 break;
2340 case '#': // Ignore the rest of the constraint alternative.
2341 while (Constraint[1] && Constraint[1] != ',')
2342 Constraint++;
2343 break;
2344 case '&':
2345 case '%':
2346 Result += *Constraint;
2347 while (Constraint[1] && Constraint[1] == *Constraint)
2348 Constraint++;
2349 break;
2350 case ',':
2351 Result += "|";
2352 break;
2353 case 'g':
2354 Result += "imr";
2355 break;
2356 case '[': {
2357 assert(OutCons &&
2358 "Must pass output names to constraints with a symbolic name");
2359 unsigned Index;
2360 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2361 assert(result && "Could not resolve symbolic name"); (void)result;
2362 Result += llvm::utostr(Index);
2363 break;
2364 }
2365 }
2366
2367 Constraint++;
2368 }
2369
2370 return Result;
2371}
2372
2373/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2374/// as using a particular register add that as a constraint that will be used
2375/// in this asm stmt.
2376static std::string
2377AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2379 const AsmStmt &Stmt, const bool EarlyClobber,
2380 std::string *GCCReg = nullptr) {
2381 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2382 if (!AsmDeclRef)
2383 return Constraint;
2384 const ValueDecl &Value = *AsmDeclRef->getDecl();
2385 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2386 if (!Variable)
2387 return Constraint;
2389 return Constraint;
2390 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2391 if (!Attr)
2392 return Constraint;
2393 StringRef Register = Attr->getLabel();
2394 assert(Target.isValidGCCRegisterName(Register));
2395 // We're using validateOutputConstraint here because we only care if
2396 // this is a register constraint.
2397 TargetInfo::ConstraintInfo Info(Constraint, "");
2398 if (Target.validateOutputConstraint(Info) &&
2399 !Info.allowsRegister()) {
2400 CGM.ErrorUnsupported(&Stmt, "__asm__");
2401 return Constraint;
2402 }
2403 // Canonicalize the register here before returning it.
2404 Register = Target.getNormalizedGCCRegisterName(Register);
2405 if (GCCReg != nullptr)
2406 *GCCReg = Register.str();
2407 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2408}
2409
2410std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2411 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2412 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2413 if (Info.allowsRegister() || !Info.allowsMemory()) {
2415 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2416
2417 llvm::Type *Ty = ConvertType(InputType);
2418 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2419 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2420 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2421 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2422
2423 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2424 nullptr};
2425 }
2426 }
2427
2428 Address Addr = InputValue.getAddress();
2429 ConstraintStr += '*';
2430 return {InputValue.getPointer(*this), Addr.getElementType()};
2431}
2432
2433std::pair<llvm::Value *, llvm::Type *>
2434CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2435 const Expr *InputExpr,
2436 std::string &ConstraintStr) {
2437 // If this can't be a register or memory, i.e., has to be a constant
2438 // (immediate or symbolic), try to emit it as such.
2439 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2440 if (Info.requiresImmediateConstant()) {
2441 Expr::EvalResult EVResult;
2442 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2443
2444 llvm::APSInt IntResult;
2445 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2446 getContext()))
2447 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2448 }
2449
2451 if (InputExpr->EvaluateAsInt(Result, getContext()))
2452 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2453 nullptr};
2454 }
2455
2456 if (Info.allowsRegister() || !Info.allowsMemory())
2458 return {EmitScalarExpr(InputExpr), nullptr};
2459 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2460 return {EmitScalarExpr(InputExpr), nullptr};
2461 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2462 LValue Dest = EmitLValue(InputExpr);
2463 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2464 InputExpr->getExprLoc());
2465}
2466
2467/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2468/// asm call instruction. The !srcloc MDNode contains a list of constant
2469/// integers which are the source locations of the start of each line in the
2470/// asm.
2471static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2472 CodeGenFunction &CGF) {
2474 // Add the location of the first line to the MDNode.
2475 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2476 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2477 StringRef StrVal = Str->getString();
2478 if (!StrVal.empty()) {
2480 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2481 unsigned StartToken = 0;
2482 unsigned ByteOffset = 0;
2483
2484 // Add the location of the start of each subsequent line of the asm to the
2485 // MDNode.
2486 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2487 if (StrVal[i] != '\n') continue;
2488 SourceLocation LineLoc = Str->getLocationOfByte(
2489 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2490 Locs.push_back(llvm::ConstantAsMetadata::get(
2491 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2492 }
2493 }
2494
2495 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2496}
2497
2498static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2499 bool HasUnwindClobber, bool ReadOnly,
2500 bool ReadNone, bool NoMerge, bool NoConvergent,
2501 const AsmStmt &S,
2502 const std::vector<llvm::Type *> &ResultRegTypes,
2503 const std::vector<llvm::Type *> &ArgElemTypes,
2504 CodeGenFunction &CGF,
2505 std::vector<llvm::Value *> &RegResults) {
2506 if (!HasUnwindClobber)
2507 Result.addFnAttr(llvm::Attribute::NoUnwind);
2508
2509 if (NoMerge)
2510 Result.addFnAttr(llvm::Attribute::NoMerge);
2511 // Attach readnone and readonly attributes.
2512 if (!HasSideEffect) {
2513 if (ReadNone)
2514 Result.setDoesNotAccessMemory();
2515 else if (ReadOnly)
2516 Result.setOnlyReadsMemory();
2517 }
2518
2519 // Add elementtype attribute for indirect constraints.
2520 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2521 if (Pair.value()) {
2522 auto Attr = llvm::Attribute::get(
2523 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2524 Result.addParamAttr(Pair.index(), Attr);
2525 }
2526 }
2527
2528 // Slap the source location of the inline asm into a !srcloc metadata on the
2529 // call.
2530 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2531 Result.setMetadata("srcloc",
2532 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2533 else {
2534 // At least put the line number on MS inline asm blobs.
2535 llvm::Constant *Loc =
2536 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2537 Result.setMetadata("srcloc",
2538 llvm::MDNode::get(CGF.getLLVMContext(),
2539 llvm::ConstantAsMetadata::get(Loc)));
2540 }
2541
2542 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2543 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2544 // convergent (meaning, they may call an intrinsically convergent op, such
2545 // as bar.sync, and so can't have certain optimizations applied around
2546 // them) unless it's explicitly marked 'noconvergent'.
2547 Result.addFnAttr(llvm::Attribute::Convergent);
2548 // Extract all of the register value results from the asm.
2549 if (ResultRegTypes.size() == 1) {
2550 RegResults.push_back(&Result);
2551 } else {
2552 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2553 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2554 RegResults.push_back(Tmp);
2555 }
2556 }
2557}
2558
2559static void
2561 const llvm::ArrayRef<llvm::Value *> RegResults,
2562 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2563 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2564 const llvm::ArrayRef<LValue> ResultRegDests,
2565 const llvm::ArrayRef<QualType> ResultRegQualTys,
2566 const llvm::BitVector &ResultTypeRequiresCast,
2567 const llvm::BitVector &ResultRegIsFlagReg) {
2569 CodeGenModule &CGM = CGF.CGM;
2570 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2571
2572 assert(RegResults.size() == ResultRegTypes.size());
2573 assert(RegResults.size() == ResultTruncRegTypes.size());
2574 assert(RegResults.size() == ResultRegDests.size());
2575 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2576 // in which case its size may grow.
2577 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2578 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2579
2580 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2581 llvm::Value *Tmp = RegResults[i];
2582 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2583
2584 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2585 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2586 // value.
2587 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2588 llvm::Value *IsBooleanValue =
2589 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2590 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2591 Builder.CreateCall(FnAssume, IsBooleanValue);
2592 }
2593
2594 // If the result type of the LLVM IR asm doesn't match the result type of
2595 // the expression, do the conversion.
2596 if (ResultRegTypes[i] != TruncTy) {
2597
2598 // Truncate the integer result to the right size, note that TruncTy can be
2599 // a pointer.
2600 if (TruncTy->isFloatingPointTy())
2601 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2602 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2603 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2604 Tmp = Builder.CreateTrunc(
2605 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2606 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2607 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2608 uint64_t TmpSize =
2609 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2610 Tmp = Builder.CreatePtrToInt(
2611 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2612 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2613 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2614 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2615 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2616 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2617 }
2618 }
2619
2620 LValue Dest = ResultRegDests[i];
2621 // ResultTypeRequiresCast elements correspond to the first
2622 // ResultTypeRequiresCast.size() elements of RegResults.
2623 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2624 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2625 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2626 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2627 Builder.CreateStore(Tmp, A);
2628 continue;
2629 }
2630
2631 QualType Ty =
2632 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2633 if (Ty.isNull()) {
2634 const Expr *OutExpr = S.getOutputExpr(i);
2635 CGM.getDiags().Report(OutExpr->getExprLoc(),
2636 diag::err_store_value_to_reg);
2637 return;
2638 }
2639 Dest = CGF.MakeAddrLValue(A, Ty);
2640 }
2641 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2642 }
2643}
2644
2646 const AsmStmt &S) {
2647 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2648
2649 StringRef Asm;
2650 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2651 Asm = GCCAsm->getAsmString()->getString();
2652
2653 auto &Ctx = CGF->CGM.getLLVMContext();
2654
2655 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2656 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2657 {StrTy->getType()}, false);
2658 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2659
2660 CGF->Builder.CreateCall(UBF, {StrTy});
2661}
2662
2664 // Pop all cleanup blocks at the end of the asm statement.
2665 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2666
2667 // Assemble the final asm string.
2668 std::string AsmString = S.generateAsmString(getContext());
2669
2670 // Get all the output and input constraints together.
2671 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2672 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2673
2674 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2675 bool IsValidTargetAsm = true;
2676 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2677 StringRef Name;
2678 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2679 Name = GAS->getOutputName(i);
2680 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2681 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2682 if (IsHipStdPar && !IsValid)
2683 IsValidTargetAsm = false;
2684 else
2685 assert(IsValid && "Failed to parse output constraint");
2686 OutputConstraintInfos.push_back(Info);
2687 }
2688
2689 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2690 StringRef Name;
2691 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2692 Name = GAS->getInputName(i);
2693 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2694 bool IsValid =
2695 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2696 if (IsHipStdPar && !IsValid)
2697 IsValidTargetAsm = false;
2698 else
2699 assert(IsValid && "Failed to parse input constraint");
2700 InputConstraintInfos.push_back(Info);
2701 }
2702
2703 if (!IsValidTargetAsm)
2704 return EmitHipStdParUnsupportedAsm(this, S);
2705
2706 std::string Constraints;
2707
2708 std::vector<LValue> ResultRegDests;
2709 std::vector<QualType> ResultRegQualTys;
2710 std::vector<llvm::Type *> ResultRegTypes;
2711 std::vector<llvm::Type *> ResultTruncRegTypes;
2712 std::vector<llvm::Type *> ArgTypes;
2713 std::vector<llvm::Type *> ArgElemTypes;
2714 std::vector<llvm::Value*> Args;
2715 llvm::BitVector ResultTypeRequiresCast;
2716 llvm::BitVector ResultRegIsFlagReg;
2717
2718 // Keep track of inout constraints.
2719 std::string InOutConstraints;
2720 std::vector<llvm::Value*> InOutArgs;
2721 std::vector<llvm::Type*> InOutArgTypes;
2722 std::vector<llvm::Type*> InOutArgElemTypes;
2723
2724 // Keep track of out constraints for tied input operand.
2725 std::vector<std::string> OutputConstraints;
2726
2727 // Keep track of defined physregs.
2728 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2729
2730 // An inline asm can be marked readonly if it meets the following conditions:
2731 // - it doesn't have any sideeffects
2732 // - it doesn't clobber memory
2733 // - it doesn't return a value by-reference
2734 // It can be marked readnone if it doesn't have any input memory constraints
2735 // in addition to meeting the conditions listed above.
2736 bool ReadOnly = true, ReadNone = true;
2737
2738 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2739 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2740
2741 // Simplify the output constraint.
2742 std::string OutputConstraint(S.getOutputConstraint(i));
2743 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2744 getTarget(), &OutputConstraintInfos);
2745
2746 const Expr *OutExpr = S.getOutputExpr(i);
2747 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2748
2749 std::string GCCReg;
2750 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2751 getTarget(), CGM, S,
2752 Info.earlyClobber(),
2753 &GCCReg);
2754 // Give an error on multiple outputs to same physreg.
2755 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2756 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2757
2758 OutputConstraints.push_back(OutputConstraint);
2759 LValue Dest = EmitLValue(OutExpr);
2760 if (!Constraints.empty())
2761 Constraints += ',';
2762
2763 // If this is a register output, then make the inline asm return it
2764 // by-value. If this is a memory result, return the value by-reference.
2765 QualType QTy = OutExpr->getType();
2766 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2768 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2769
2770 Constraints += "=" + OutputConstraint;
2771 ResultRegQualTys.push_back(QTy);
2772 ResultRegDests.push_back(Dest);
2773
2774 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2775 ResultRegIsFlagReg.push_back(IsFlagReg);
2776
2777 llvm::Type *Ty = ConvertTypeForMem(QTy);
2778 const bool RequiresCast = Info.allowsRegister() &&
2780 Ty->isAggregateType());
2781
2782 ResultTruncRegTypes.push_back(Ty);
2783 ResultTypeRequiresCast.push_back(RequiresCast);
2784
2785 if (RequiresCast) {
2786 unsigned Size = getContext().getTypeSize(QTy);
2787 if (Size)
2788 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2789 else
2790 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2791 }
2792 ResultRegTypes.push_back(Ty);
2793 // If this output is tied to an input, and if the input is larger, then
2794 // we need to set the actual result type of the inline asm node to be the
2795 // same as the input type.
2796 if (Info.hasMatchingInput()) {
2797 unsigned InputNo;
2798 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2799 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2800 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2801 break;
2802 }
2803 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2804
2805 QualType InputTy = S.getInputExpr(InputNo)->getType();
2806 QualType OutputType = OutExpr->getType();
2807
2808 uint64_t InputSize = getContext().getTypeSize(InputTy);
2809 if (getContext().getTypeSize(OutputType) < InputSize) {
2810 // Form the asm to return the value as a larger integer or fp type.
2811 ResultRegTypes.back() = ConvertType(InputTy);
2812 }
2813 }
2814 if (llvm::Type* AdjTy =
2815 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2816 ResultRegTypes.back()))
2817 ResultRegTypes.back() = AdjTy;
2818 else {
2819 CGM.getDiags().Report(S.getAsmLoc(),
2820 diag::err_asm_invalid_type_in_input)
2821 << OutExpr->getType() << OutputConstraint;
2822 }
2823
2824 // Update largest vector width for any vector types.
2825 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2826 LargestVectorWidth =
2827 std::max((uint64_t)LargestVectorWidth,
2828 VT->getPrimitiveSizeInBits().getKnownMinValue());
2829 } else {
2830 Address DestAddr = Dest.getAddress();
2831 // Matrix types in memory are represented by arrays, but accessed through
2832 // vector pointers, with the alignment specified on the access operation.
2833 // For inline assembly, update pointer arguments to use vector pointers.
2834 // Otherwise there will be a mis-match if the matrix is also an
2835 // input-argument which is represented as vector.
2836 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2837 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2838
2839 ArgTypes.push_back(DestAddr.getType());
2840 ArgElemTypes.push_back(DestAddr.getElementType());
2841 Args.push_back(DestAddr.emitRawPointer(*this));
2842 Constraints += "=*";
2843 Constraints += OutputConstraint;
2844 ReadOnly = ReadNone = false;
2845 }
2846
2847 if (Info.isReadWrite()) {
2848 InOutConstraints += ',';
2849
2850 const Expr *InputExpr = S.getOutputExpr(i);
2851 llvm::Value *Arg;
2852 llvm::Type *ArgElemType;
2853 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2854 Info, Dest, InputExpr->getType(), InOutConstraints,
2855 InputExpr->getExprLoc());
2856
2857 if (llvm::Type* AdjTy =
2858 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2859 Arg->getType()))
2860 Arg = Builder.CreateBitCast(Arg, AdjTy);
2861
2862 // Update largest vector width for any vector types.
2863 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2864 LargestVectorWidth =
2865 std::max((uint64_t)LargestVectorWidth,
2866 VT->getPrimitiveSizeInBits().getKnownMinValue());
2867 // Only tie earlyclobber physregs.
2868 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2869 InOutConstraints += llvm::utostr(i);
2870 else
2871 InOutConstraints += OutputConstraint;
2872
2873 InOutArgTypes.push_back(Arg->getType());
2874 InOutArgElemTypes.push_back(ArgElemType);
2875 InOutArgs.push_back(Arg);
2876 }
2877 }
2878
2879 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2880 // to the return value slot. Only do this when returning in registers.
2881 if (isa<MSAsmStmt>(&S)) {
2882 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2883 if (RetAI.isDirect() || RetAI.isExtend()) {
2884 // Make a fake lvalue for the return value slot.
2887 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2888 ResultRegDests, AsmString, S.getNumOutputs());
2889 SawAsmBlock = true;
2890 }
2891 }
2892
2893 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2894 const Expr *InputExpr = S.getInputExpr(i);
2895
2896 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2897
2898 if (Info.allowsMemory())
2899 ReadNone = false;
2900
2901 if (!Constraints.empty())
2902 Constraints += ',';
2903
2904 // Simplify the input constraint.
2905 std::string InputConstraint(S.getInputConstraint(i));
2906 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2907 &OutputConstraintInfos);
2908
2909 InputConstraint = AddVariableConstraints(
2910 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2911 getTarget(), CGM, S, false /* No EarlyClobber */);
2912
2913 std::string ReplaceConstraint (InputConstraint);
2914 llvm::Value *Arg;
2915 llvm::Type *ArgElemType;
2916 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2917
2918 // If this input argument is tied to a larger output result, extend the
2919 // input to be the same size as the output. The LLVM backend wants to see
2920 // the input and output of a matching constraint be the same size. Note
2921 // that GCC does not define what the top bits are here. We use zext because
2922 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2923 if (Info.hasTiedOperand()) {
2924 unsigned Output = Info.getTiedOperand();
2925 QualType OutputType = S.getOutputExpr(Output)->getType();
2926 QualType InputTy = InputExpr->getType();
2927
2928 if (getContext().getTypeSize(OutputType) >
2929 getContext().getTypeSize(InputTy)) {
2930 // Use ptrtoint as appropriate so that we can do our extension.
2931 if (isa<llvm::PointerType>(Arg->getType()))
2932 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2933 llvm::Type *OutputTy = ConvertType(OutputType);
2934 if (isa<llvm::IntegerType>(OutputTy))
2935 Arg = Builder.CreateZExt(Arg, OutputTy);
2936 else if (isa<llvm::PointerType>(OutputTy))
2937 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2938 else if (OutputTy->isFloatingPointTy())
2939 Arg = Builder.CreateFPExt(Arg, OutputTy);
2940 }
2941 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2942 ReplaceConstraint = OutputConstraints[Output];
2943 }
2944 if (llvm::Type* AdjTy =
2945 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2946 Arg->getType()))
2947 Arg = Builder.CreateBitCast(Arg, AdjTy);
2948 else
2949 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2950 << InputExpr->getType() << InputConstraint;
2951
2952 // Update largest vector width for any vector types.
2953 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2954 LargestVectorWidth =
2955 std::max((uint64_t)LargestVectorWidth,
2956 VT->getPrimitiveSizeInBits().getKnownMinValue());
2957
2958 ArgTypes.push_back(Arg->getType());
2959 ArgElemTypes.push_back(ArgElemType);
2960 Args.push_back(Arg);
2961 Constraints += InputConstraint;
2962 }
2963
2964 // Append the "input" part of inout constraints.
2965 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2966 ArgTypes.push_back(InOutArgTypes[i]);
2967 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2968 Args.push_back(InOutArgs[i]);
2969 }
2970 Constraints += InOutConstraints;
2971
2972 // Labels
2974 llvm::BasicBlock *Fallthrough = nullptr;
2975 bool IsGCCAsmGoto = false;
2976 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2977 IsGCCAsmGoto = GS->isAsmGoto();
2978 if (IsGCCAsmGoto) {
2979 for (const auto *E : GS->labels()) {
2980 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2981 Transfer.push_back(Dest.getBlock());
2982 if (!Constraints.empty())
2983 Constraints += ',';
2984 Constraints += "!i";
2985 }
2986 Fallthrough = createBasicBlock("asm.fallthrough");
2987 }
2988 }
2989
2990 bool HasUnwindClobber = false;
2991
2992 // Clobbers
2993 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2994 StringRef Clobber = S.getClobber(i);
2995
2996 if (Clobber == "memory")
2997 ReadOnly = ReadNone = false;
2998 else if (Clobber == "unwind") {
2999 HasUnwindClobber = true;
3000 continue;
3001 } else if (Clobber != "cc") {
3002 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3003 if (CGM.getCodeGenOpts().StackClashProtector &&
3004 getTarget().isSPRegName(Clobber)) {
3005 CGM.getDiags().Report(S.getAsmLoc(),
3006 diag::warn_stack_clash_protection_inline_asm);
3007 }
3008 }
3009
3010 if (isa<MSAsmStmt>(&S)) {
3011 if (Clobber == "eax" || Clobber == "edx") {
3012 if (Constraints.find("=&A") != std::string::npos)
3013 continue;
3014 std::string::size_type position1 =
3015 Constraints.find("={" + Clobber.str() + "}");
3016 if (position1 != std::string::npos) {
3017 Constraints.insert(position1 + 1, "&");
3018 continue;
3019 }
3020 std::string::size_type position2 = Constraints.find("=A");
3021 if (position2 != std::string::npos) {
3022 Constraints.insert(position2 + 1, "&");
3023 continue;
3024 }
3025 }
3026 }
3027 if (!Constraints.empty())
3028 Constraints += ',';
3029
3030 Constraints += "~{";
3031 Constraints += Clobber;
3032 Constraints += '}';
3033 }
3034
3035 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3036 "unwind clobber can't be used with asm goto");
3037
3038 // Add machine specific clobbers
3039 std::string_view MachineClobbers = getTarget().getClobbers();
3040 if (!MachineClobbers.empty()) {
3041 if (!Constraints.empty())
3042 Constraints += ',';
3043 Constraints += MachineClobbers;
3044 }
3045
3046 llvm::Type *ResultType;
3047 if (ResultRegTypes.empty())
3048 ResultType = VoidTy;
3049 else if (ResultRegTypes.size() == 1)
3050 ResultType = ResultRegTypes[0];
3051 else
3052 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3053
3054 llvm::FunctionType *FTy =
3055 llvm::FunctionType::get(ResultType, ArgTypes, false);
3056
3057 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3058
3059 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3060 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3061 ? llvm::InlineAsm::AD_ATT
3062 : llvm::InlineAsm::AD_Intel;
3063 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3064 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3065
3066 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3067 FTy, AsmString, Constraints, HasSideEffect,
3068 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3069 std::vector<llvm::Value*> RegResults;
3070 llvm::CallBrInst *CBR;
3071 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3072 CBRRegResults;
3073 if (IsGCCAsmGoto) {
3074 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3075 EmitBlock(Fallthrough);
3076 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3077 ReadNone, InNoMergeAttributedStmt,
3078 InNoConvergentAttributedStmt, S, ResultRegTypes,
3079 ArgElemTypes, *this, RegResults);
3080 // Because we are emitting code top to bottom, we don't have enough
3081 // information at this point to know precisely whether we have a critical
3082 // edge. If we have outputs, split all indirect destinations.
3083 if (!RegResults.empty()) {
3084 unsigned i = 0;
3085 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3086 llvm::Twine SynthName = Dest->getName() + ".split";
3087 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3088 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3089 Builder.SetInsertPoint(SynthBB);
3090
3091 if (ResultRegTypes.size() == 1) {
3092 CBRRegResults[SynthBB].push_back(CBR);
3093 } else {
3094 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3095 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3096 CBRRegResults[SynthBB].push_back(Tmp);
3097 }
3098 }
3099
3100 EmitBranch(Dest);
3101 EmitBlock(SynthBB);
3102 CBR->setIndirectDest(i++, SynthBB);
3103 }
3104 }
3105 } else if (HasUnwindClobber) {
3106 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3107 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3108 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3109 InNoConvergentAttributedStmt, S, ResultRegTypes,
3110 ArgElemTypes, *this, RegResults);
3111 } else {
3112 llvm::CallInst *Result =
3113 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3114 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3115 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3116 InNoConvergentAttributedStmt, S, ResultRegTypes,
3117 ArgElemTypes, *this, RegResults);
3118 }
3119
3120 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3121 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3122 ResultRegIsFlagReg);
3123
3124 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3125 // different insertion point; one for each indirect destination and with
3126 // CBRRegResults rather than RegResults.
3127 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3128 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3129 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3130 Builder.SetInsertPoint(Succ, --(Succ->end()));
3131 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3132 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3133 ResultTypeRequiresCast, ResultRegIsFlagReg);
3134 }
3135 }
3136}
3137
3139 const RecordDecl *RD = S.getCapturedRecordDecl();
3140 QualType RecordTy = getContext().getRecordType(RD);
3141
3142 // Initialize the captured struct.
3143 LValue SlotLV =
3144 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3145
3146 RecordDecl::field_iterator CurField = RD->field_begin();
3147 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3148 E = S.capture_init_end();
3149 I != E; ++I, ++CurField) {
3150 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3151 if (CurField->hasCapturedVLAType()) {
3152 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3153 } else {
3154 EmitInitializerForField(*CurField, LV, *I);
3155 }
3156 }
3157
3158 return SlotLV;
3159}
3160
3161/// Generate an outlined function for the body of a CapturedStmt, store any
3162/// captured variables into the captured struct, and call the outlined function.
3163llvm::Function *
3165 LValue CapStruct = InitCapturedStruct(S);
3166
3167 // Emit the CapturedDecl
3168 CodeGenFunction CGF(CGM, true);
3169 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3170 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3171 delete CGF.CapturedStmtInfo;
3172
3173 // Emit call to the helper function.
3174 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3175
3176 return F;
3177}
3178
3180 LValue CapStruct = InitCapturedStruct(S);
3181 return CapStruct.getAddress();
3182}
3183
3184/// Creates the outlined function for a CapturedStmt.
3185llvm::Function *
3187 assert(CapturedStmtInfo &&
3188 "CapturedStmtInfo should be set when generating the captured function");
3189 const CapturedDecl *CD = S.getCapturedDecl();
3190 const RecordDecl *RD = S.getCapturedRecordDecl();
3191 SourceLocation Loc = S.getBeginLoc();
3192 assert(CD->hasBody() && "missing CapturedDecl body");
3193
3194 // Build the argument list.
3195 ASTContext &Ctx = CGM.getContext();
3196 FunctionArgList Args;
3197 Args.append(CD->param_begin(), CD->param_end());
3198
3199 // Create the function declaration.
3200 const CGFunctionInfo &FuncInfo =
3202 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3203
3204 llvm::Function *F =
3205 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3207 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3208 if (CD->isNothrow())
3209 F->addFnAttr(llvm::Attribute::NoUnwind);
3210
3211 // Generate the function.
3212 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3213 CD->getBody()->getBeginLoc());
3214 // Set the context parameter in CapturedStmtInfo.
3215 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3217
3218 // Initialize variable-length arrays.
3221 for (auto *FD : RD->fields()) {
3222 if (FD->hasCapturedVLAType()) {
3223 auto *ExprArg =
3224 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
3225 .getScalarVal();
3226 auto VAT = FD->getCapturedVLAType();
3227 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3228 }
3229 }
3230
3231 // If 'this' is captured, load it into CXXThisValue.
3234 LValue ThisLValue = EmitLValueForField(Base, FD);
3235 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3236 }
3237
3238 PGO.assignRegionCounters(GlobalDecl(CD), F);
3239 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3241
3242 return F;
3243}
3244
3245// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3246// std::nullptr otherwise.
3247static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3248 for (auto &I : *BB) {
3249 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I))
3250 return CI;
3251 }
3252 return nullptr;
3253}
3254
3255llvm::CallBase *
3256CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3257 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3258 assert(ParentToken);
3259
3260 llvm::Value *bundleArgs[] = {ParentToken};
3261 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3262 auto *Output = llvm::CallBase::addOperandBundle(
3263 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3264 Input->replaceAllUsesWith(Output);
3265 Input->eraseFromParent();
3266 return Output;
3267}
3268
3269llvm::ConvergenceControlInst *
3270CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3271 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3272 if (BB->empty())
3273 Builder.SetInsertPoint(BB);
3274 else
3275 Builder.SetInsertPoint(BB->getFirstInsertionPt());
3276
3277 llvm::CallBase *CB = Builder.CreateIntrinsic(
3278 llvm::Intrinsic::experimental_convergence_loop, {}, {});
3279 Builder.restoreIP(IP);
3280
3281 CB = addConvergenceControlToken(CB);
3282 return cast<llvm::ConvergenceControlInst>(CB);
3283}
3284
3285llvm::ConvergenceControlInst *
3286CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3287 llvm::BasicBlock *BB = &F->getEntryBlock();
3288 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3289 if (Token)
3290 return Token;
3291
3292 // Adding a convergence token requires the function to be marked as
3293 // convergent.
3294 F->setConvergent();
3295
3296 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3297 Builder.SetInsertPoint(&BB->front());
3298 llvm::CallBase *I = Builder.CreateIntrinsic(
3299 llvm::Intrinsic::experimental_convergence_entry, {}, {});
3300 assert(isa<llvm::IntrinsicInst>(I));
3301 Builder.restoreIP(IP);
3302
3303 return cast<llvm::ConvergenceControlInst>(I);
3304}
#define V(N, I)
Definition: ASTContext.h:3443
#define SM(sm)
Definition: Cuda.cpp:84
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2377
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:2048
static llvm::ConvergenceControlInst * getConvergenceToken(llvm::BasicBlock *BB)
Definition: CGStmt.cpp:3247
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2645
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:2102
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2471
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2324
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition: CGStmt.cpp:1473
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1893
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2560
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition: CGStmt.cpp:1006
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1892
@ CSFC_Failure
Definition: CGStmt.cpp:1892
@ CSFC_Success
Definition: CGStmt.cpp:1892
@ CSFC_FallThrough
Definition: CGStmt.cpp:1892
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2498
const Decl * D
Expr * E
llvm::MachO::Target Target
Definition: MachO.h:51
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
SourceRange Range
Definition: SemaObjC.cpp:758
VarDecl * Variable
Definition: SemaObjC.cpp:757
SourceLocation Loc
Definition: SemaObjC.cpp:759
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:953
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
SourceManager & getSourceManager()
Definition: ASTContext.h:741
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2482
CanQualType VoidTy
Definition: ASTContext.h:1160
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3127
Attr - This represents one attribute.
Definition: Attr.h:43
Represents an attribute applied to a statement.
Definition: Stmt.h:2107
BreakStmt - This represents a break.
Definition: Stmt.h:3007
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
Expr * getCallee()
Definition: Expr.h:3024
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4673
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4735
bool isNothrow() const
Definition: Decl.cpp:5466
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4752
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4750
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5463
This captures a statement into a function.
Definition: Stmt.h:3784
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3948
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1429
CaseStmt - Represent a case statement.
Definition: Stmt.h:1828
Stmt * getSubStmt()
Definition: Stmt.h:1945
Expr * getLHS()
Definition: Stmt.h:1915
Expr * getRHS()
Definition: Stmt.h:1927
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:856
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:913
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:164
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:717
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitIfStmt(const IfStmt &S)
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void EmitOMPReverseDirective(const OMPReverseDirective &S)
static bool hasScalarEvaluationKind(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
const TargetCodeGenInfo & getTargetHooks() const
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
void EmitOMPScopeDirective(const OMPScopeDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:53
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1630
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:679
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:364
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:382
LValue - This represents an lvalue references.
Definition: CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition: CGValue.h:361
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:834
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:204
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:198
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1628
Stmt *const * const_body_iterator
Definition: Stmt.h:1700
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
ContinueStmt - This represents a continue.
Definition: Stmt.h:2977
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2369
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
ValueDecl * getDecl()
Definition: Expr.h:1333
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1519
T * getAttr() const
Definition: DeclBase.h:576
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1064
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1082
SourceLocation getLocation() const
Definition: DeclBase.h:442
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1493
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2752
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3117
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3086
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3587
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3033
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2808
const Expr * getSubExpr() const
Definition: Expr.h:1057
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:4321
CallingConv getCallConv() const
Definition: Type.h:4654
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3286
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2889
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2165
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2928
Represents the declaration of a label.
Definition: Decl.h:503
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2058
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:499
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:697
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:929
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
QualType getCanonicalType() const
Definition: Type.h:7983
The collection of all-type qualifiers we support.
Definition: Type.h:324
Represents a struct/union/class.
Definition: Decl.h:4148
field_range fields() const
Definition: Decl.h:4354
field_iterator field_begin() const
Definition: Decl.cpp:5092
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3046
Expr * getRetValue()
Definition: Stmt.h:3077
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
@ NoStmtClass
Definition: Stmt.h:87
StmtClass getStmtClass() const
Definition: Stmt.h:1380
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1323
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1324
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1325
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1327
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:170
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:345
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:162
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1959
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1325
StringRef getString() const
Definition: Expr.h:1855
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1801
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2415
Exposes information about the current target.
Definition: TargetInfo.h:220
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:839
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:701
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:742
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Token - This structure provides full information about a lexed token.
Definition: Token.h:36
bool isVoidType() const
Definition: Type.h:8510
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8800
bool isReferenceType() const
Definition: Type.h:8204
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:671
Represents a variable declaration or definition.
Definition: Decl.h:882
StorageClass getStorageClass() const
Returns the storage class as written in the source.
Definition: Decl.h:1119
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2611
Defines the clang::TargetInfo interface.
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:654
bool Ret(InterpState &S, CodePtr &PC)
Definition: Interp.h:318
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:257
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:294
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1131
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1138